query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Tell the RoboVac to clean a single room. | def start_single_room_clean(self):
command = _build_robovac_command(RobovacModes.WORK, RobovacCommands.SINGLE_ROOM_CLEAN)
message = self._build_command_user_data_message(command)
self._send_packet(message, False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def clear(self, ctx):\n if ctx.message.author.top_role.name.lower() == 'officer':\n with SQLCursor(self.db) as cur:\n cur.execute('UPDATE govt_info SET officer = Null;')\n await ctx.message.channel.send(\n 'Successfully cleared all officers from all positions in the SQLite table.')\n else:\n await ctx.message.channel.send('Hey! You do not have permission to do that.')",
"def remove_room_from_current(self, login, room):\n pass",
"def clear(self):\r\n self.room_value.set('')",
"def delete_room(context):\n\n room = context.get('spark.room')\n bearer = context.get('spark.CISCO_SPARK_PLUMBERY_BOT')\n\n print(\"Deleting Cisco Spark room '{}'\".format(room))\n\n url = 'https://api.ciscospark.com/v1/rooms'\n headers = {'Authorization': 'Bearer '+bearer}\n response = requests.get(url=url, headers=headers)\n\n if response.status_code != 200:\n print(response.json())\n raise Exception(\"Received error code {}\".format(response.status_code))\n\n actual = False\n for item in response.json()['items']:\n\n if room in item['title']:\n print(\"- found it\")\n print(\"- DELETING IT\")\n\n url = 'https://api.ciscospark.com/v1/rooms/{}'.format(item['id'])\n headers = {'Authorization': 'Bearer '+bearer}\n response = requests.delete(url=url, headers=headers)\n\n if response.status_code != 204:\n raise Exception(\"Received error code {}\".format(response.status_code))\n\n actual = True\n\n if actual:\n print(\"- room will be re-created in Cisco Spark\")\n else:\n print(\"- no room with this name yet\")\n\n context.set('spark.room_id', None)",
"def close_room(room):\n return request.namespace.close_room(room)",
"def deallocate_person(self, person, room_type):\n if room_type == \"OfficeSpace\":\n room = person.office\n self.offices[room]['room'].deallocate_room_space()\n occupant = person.name + \"\\t\" + person.email\n self.offices[room]['occupants'].remove(occupant)\n person.set_office(None)\n elif room_type == \"LivingSpace\":\n room = person.living_space\n occupant = person.name + \"\\t\" + person.email\n self.living_spaces[room]['room'].deallocate_room_space()\n self.living_spaces[room]['occupants'].remove(occupant)\n person.set_livingspace(None)",
"def __call__(self,camp):\n if self.npc in camp.party:\n camp.assign_pilot_to_mecha(self.npc,None)\n camp.party.remove(self.npc)\n for mek in list(camp.party):\n if hasattr(mek,\"owner\") and mek.owner is self.npc:\n camp.party.remove(mek)",
"def clear_mission(self):\n cmds = self.vehicle.commands\n self.vehicle.commands.clear()\n self.vehicle.flush()\n\n # After clearing the mission, we MUST re-download the mission from the \n # vehicle before vehicle.commands can be used again.\n # See https://github.com/dronekit/dronekit-python/issues/230 for \n # reasoning.\n self.download_mission()",
"def room_delete(room_id):\n room = Room.query.get(room_id)\n if room is None:\n abort(404, 'room not found')\n\n get_db().delete(room)\n get_db().commit()\n\n return '', 204",
"async def clean(self, ctx):\n pass",
"def leave_room(room):\n return request.namespace.leave_room(room)",
"def handle_delete_room(self, lobby_command, client_socket):\n user = self.clients[client_socket]['data'].decode('utf-8')\n roomname = lobby_command.split()[1]\n msg = f\"Handling room deletion of {roomname} by {user}\"\n print(msg)\n for _room in self.rooms:\n if _room.name == roomname and user in _room.room_attrbts['admins']:\n msg = f\"Room {roomname} is being deleted by admin {user}\"\n self.rooms.remove(_room)\n self.log_and_send(client_socket, msg)\n return\n msg = f\"Room {roomname} was not found or user is not permitted to delete\"\n self.log_and_send(client_socket, msg)",
"async def on_room_deinit(self, room_obj):\n pass",
"async def quit_room(self, label):\n room = await self.get_room(label)\n if not room.is_ready:\n await self.exit_room(room)\n self.rooms.discard(label)\n await self.channel_layer.discard(\n room.group_name,\n self.channel_name\n )\n await self.channel_layer.group_send(\n room.group_name,\n {\n 'type': 'chat.quit',\n 'label': label,\n 'username': self.user.username,\n 'title': room.name,\n }\n )\n else:\n self.send_json(\n return_value(\n ACTION_DENIED,\n label,\n TO_ME,\n MSG_LEAVE,\n NO_MESSAGE\n )\n )",
"def func(self):\n char = self.character\n clothing = char.search(self.args, candidates=char.contents)\n if not clothing:\n return\n if not clothing.db.worn:\n char.msg(\"You're not wearing that!\")\n return\n if clothing.db.covered_by:\n char.msg(\"You have to take off %s first.\" % clothing.db.covered_by.name)\n return\n clothing.remove(char)",
"def clean(self):\n self._raw_execute(\"clean\", {\"job_id\": self.job_id})",
"def clear_room_messages(group, room):\n Message.objects.filter(room__exact=room).delete()\n channel_layer = get_channel_layer()\n async_to_sync(channel_layer.group_send)(group, {\n 'type': 'publish',\n 'payload': {\n 'type': 'app/fetchNotice',\n 'payload': 'Clear process was succeeded.',\n },\n })\n async_to_sync(channel_layer.group_send)(group, {\n 'type': 'publish',\n 'payload': {\n 'type': 'message/initRoom',\n 'payload': room,\n },\n })",
"def clean(self):\r\n self.roadrunnerModel = None\r\n return self",
"def delete(room_id):\n\n entry = Room.objects.filter(room_id=room_id).first()\n if entry is not None:\n entry.delete()\n\n entries = Players.objects.filter(room_id=room_id)\n if entries.count():\n entries.delete()\n\n round.dialog.delete_rounds(room_id=room_id, called_from=__path__+\":\"+utils.fname())",
"def die(self):\n print(\"Company\", self.ticker, \"left the universe\")\n del self\n Company.population -= 1",
"def delete(self, room_id):\n # Check for Authorization header in the form of \"Bearer <token>\"\n if \"Authorization\" not in request.headers:\n raise AuthorizationError(\"No password specified\")\n temp_pass = request.headers.get(\"Authorization\").split(\" \")[1]\n\n room = redis_store.get(room_id)\n if room is not None:\n room = loads(room)\n\n if room.get('delete_password') == temp_pass:\n redis_store.delete(room_id)\n return {'success': True}\n else:\n raise AuthorizationError(\"Wrong one time password for host.\")\n else:\n # raise 404\n raise ResourceDoesNotExist('Resource not found.')",
"def delete_vehicles(world):\n actors = world.get_actors()\n for actor in actors:\n # if actor.type_id == vehicle.*\n id = actor.type_id\n actor_type = id.split(\".\")\n\n # destroy vehicle\n if actor_type[0] == \"vehicle\":\n actor.destroy()\n print(\"vehicles are deleted\")",
"def func(self):\n if not self.args:\n self.msg(\"Roomtitle cleared.\")\n self.caller.attributes.remove(\"room_title\")\n return\n self.caller.db.room_title = self.args\n self.msg(\"Your roomtitle set to %s {w({n%s{w){n\" % (self.caller, self.args))\n self.mark_command_used()",
"def unset(bot, update, chat_data):\n if 'job' not in chat_data:\n update.message.reply_text('Sem notificacoes ativadas')\n return\n\n job = chat_data['job']\n job.schedule_removal()\n del chat_data['job']\n check = emojize(\":white_check_mark:\", use_aliases=True)\n update.message.reply_text('Notificacao cancelada com sucesso'+check+'')",
"def unclaim(self, job, owner):\n raise NotImplementedError()",
"def chamber_room():\n cont = request.get_json()\n if cont is not None:\n trigger(command_string=cont['obj'])\n return jsonify({\"SUCCESS\": True})",
"async def done(self, ctx: Context, arg):\n s = ctx.message.author\n\n q = getQueue(ctx.guild, arg)\n if arg is \"k\":\n name = \"kitchen\"\n elif arg is \"b\":\n name = \"bathroom\"\n else:\n raise Exception(\"Incorrect parameters\")\n\n if s in q:\n q.remove(s)\n logging.info('{0} remove {1}'.format(ctx.guild, s))\n await ctx.send(s.mention + ' is no longer in the ' + name)\n else:\n await ctx.send(s.mention + ' is not in the ' + name)",
"def die(self):\n self.pjs.bombermen.remove(self)\n for block in self.physics.blocks[self.stype]:\n if block == self.rects[0]:\n self.physics.blocks[self.stype].remove(block)",
"def ResetRoomba(self):\n self.roomba = pyrobot.Roomba()\n self.sensors = pyrobot.RoombaSensors(self.roomba)\n self.roomba.sci.Wake()\n self.roomba.Control()",
"def death(self):\n logging.debug(\"Wolf died.\")\n self.pack = False\n self.model.grid.remove_agent(self)\n self.model.schedule.remove(self)"
]
| [
"0.5906659",
"0.59003526",
"0.5829769",
"0.57369155",
"0.55835515",
"0.5559878",
"0.55521303",
"0.54984415",
"0.54916006",
"0.542621",
"0.542105",
"0.5406405",
"0.5371466",
"0.5371127",
"0.5288914",
"0.52766216",
"0.52469695",
"0.5244343",
"0.5212713",
"0.520392",
"0.520318",
"0.51931226",
"0.5178705",
"0.51623833",
"0.51444274",
"0.5126194",
"0.5120503",
"0.5095109",
"0.5094021",
"0.5089477"
]
| 0.6597308 | 0 |
Tell the RoboVac to stop cleaning. The RoboVac will not return to its charging base. | def stop(self):
command = _build_robovac_command(RobovacModes.WORK, RobovacCommands.STOP_CLEAN)
message = self._build_command_user_data_message(command)
self._send_packet(message, False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stopclean(self):\n raise Exception(\"Not implemented\")",
"def stop(self):\r\n self.terminating = True",
"def stop(self):\n print(\"Stopping accessory.\")",
"def stop(self):\n self._current_mc_goal = None\n self._mc_goals = []\n self._position_control_client.cancel_all_goals()\n action = Stop()\n self._velocity_control_client(pickle.dumps(action))",
"def stop(self, force=False):\n pass",
"async def async_stop(self, **kwargs: Any) -> None:\n await self._vacuum_bot.execute_command(Clean(CleanAction.STOP))",
"def stop(self):\r\n self.running = False",
"def stop(self):\r\n self.running = False",
"def stop(self, **kwargs):\n self.turn_off()",
"def stop(self):\n self.running = False",
"def stop(self):\n self.running = False",
"def stop(self):\n self.running = False",
"def stop(self):\n self.running = False",
"def stop(self):\n self.running = False",
"def _gracefully_stop(self):\n pass",
"def foreceStop(self):\n self.__success = False\n self.stop()",
"def Stop(self) :\n\t\t...",
"def stop_driving(self):\n\t\tprint(\"Stopping Red Rover..\")\n\t\tself.actuator_pub.publish(self.actuator_stop) # stops driving\n\t\tself.throttle_pub.publish(self.throttle_home) # idles engine\n\t\tprint(\"Red Rover stopped.\")\n\t\treturn",
"def stop(self) -> None:\n ...",
"def stop(self):\n self._stop_flag = True",
"def stop_driving(self):\n\n self.velocity = const.Driving.STOP_VELOCITY\n self.angle = const.Driving.NEUTRAL_STEERING_ANGLE\n\n if self.drive_thread is not None:\n self.drive_thread.stop()\n self.drive_thread = None",
"def stop(self) -> None:",
"def stop(self) -> None:",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass"
]
| [
"0.7290178",
"0.69505924",
"0.69007415",
"0.67673147",
"0.674806",
"0.67433226",
"0.67431015",
"0.67431015",
"0.67406785",
"0.67222804",
"0.67222804",
"0.67222804",
"0.67222804",
"0.67222804",
"0.6720151",
"0.671741",
"0.66967374",
"0.6682695",
"0.667672",
"0.6624697",
"0.6622165",
"0.6619484",
"0.6619484",
"0.66081697",
"0.66081697",
"0.66081697",
"0.66081697",
"0.66081697",
"0.66081697",
"0.66081697"
]
| 0.8032583 | 0 |
Start the 'find me' mode. The RoboVac will repeatedly play a chime. | def start_find_me(self):
command = _build_robovac_command(RobovacModes.FIND_ME, RobovacCommands.START_RING)
message = self._build_command_user_data_message(command)
self._send_packet(message, False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n global repeat\n regime = collect()\n start = int(raw_input(\"Which line of the exercise script would you like to begin with? \")) - 1\n regime = regime[start:]\n say(\"Ready?\")\n time.sleep(1)\n for exercise in regime:\n coach(exercise[:-1])\n while repeat:\n repeat = False\n coach(exercise[:-1])\n say(\"Session complete.\")",
"async def start_again(msg: types.Message):\n await bot.send_message(msg.chat.id, \"Search for another movie?\", reply_markup=types.ReplyKeyboardRemove())",
"def run(self):\n print \"running presence detection\"\n r = rospy.Rate(10)\n time.sleep(2)\n\n while not rospy.is_shutdown():\n if self.running:\n self.find_new_people()\n self.follow_people()\n\n r.sleep()",
"def stop_find_me(self):\n command = _build_robovac_command(RobovacModes.FIND_ME, RobovacCommands.STOP_RING)\n message = self._build_command_user_data_message(command)\n\n self._send_packet(message, False)",
"def startKuri(self):\n if self.option == 'c':\n self.useChat()\n elif self.option == 's':\n self.useSpeech()",
"def run_it():\n initialize()\n parser = get_parser()\n args = None\n first_parse = True\n while(True):\n if first_parse is True:\n first_parse = False\n args = parser.parse_args()\n \n else:\n # print(textwrap.dedent(\n # '''\\\n # Search again like in the beginning.\n # -- You can either choose best rated or list mode.\n # -- This time, you can insert the search string without double quotes.\n # Remember the list mode options!\n # 0: torrent project.\n # 1: the pirate bay.\n # 2: 1337x.\n # 3: eztv.\n # 4: limetorrents.\n # 5: isohunt.\n # '''))\n sys.exit(0)\n print('Or.. if you want to exit just write \"' +\n Colors.LRED + 'Q' + Colors.ENDC + '\" or \"' +\n Colors.LRED + 'q' + Colors.ENDC + '\".')\n input_parse = input('>> ').replace(\"'\", \"\").replace('\"', '')\n if input_parse in ['Q', 'q']:\n sys.exit(1)\n\n args = parser.parse_args(input_parse.split(' ', 2))\n \n if args.str_search.strip() == \"\":\n print('Please insert an appropiate non-empty string.')\n else:\n args.str_search = args.str_search.replace('_',' ').replace(\"'\",'')\n\n movieName = args.str_search\n #print(args.str_search)\n auto = AutoPy(*insert(args))\n auto.movieName = movieName\n auto.get_content()\n auto.select_torrent()\n auto.download_torrent()",
"def nukePlay():\n nuke.activeViewer().play(1)",
"def use_slime(slime):\n\n while True:\n activity = input(\"what would you like to do with your slime? \")\n\n slime.do(activity.lower())",
"def start(self):\n\t\tcv2.waitKey(1)\n\t\ttext, _ = self.parse_response(self.sent_text())\n\t\tprint text\n\t\tself.speak(text)\n\t\twhile(True):\n\t\t\tuser_input = self.get_user_text_input()\n\t\t\tresponse = self.sent_text(message = user_input)\n\t\t\ttext, intent = self.parse_response(response)\n\n\t\t\tif response['output'].get('query') is not None:\n\t\t\t\tquery = str(response['output']['query'])\n\t\t\t\tself.speak('Looking for ' + query) \n\t\t\t\tself.speak('This might take a while')\n\t\t\t\tfound, image = process_video.loop_through_frames(label = query)\n\t\t\t\tif found:\n\t\t\t\t\tprint text\n\t\t\t\t\tself.speak(text)\n\t\t\t\t\tcv2.imshow(\"Here it is!\", image)\n\t\t\t\t\tcv2.waitKey()\n\t\t\t\telse:\n\t\t\t\t\tself.speak(\"I am sorry, I could not find what you were looking for\")\n\t\t\t\t\t\n\t\t\t\treturn\n\t\t\tself.speak(text)\n\t\t\t#if intent == 'Lost':\n\t\t\t#\tkey = response['entities'] \n\t\t\t#\tprint \"I am looking for: \" + key\n\t\t\tprint text",
"def play_game(self):\r\n\r\n print('Welcome to a game of Concentration!!')\r\n if self.who_goes_first():\r\n self.user_turn()\r\n else:\r\n self.computer_turn()\r\n\r\n while True:\r\n if self.match:\r\n self.user_turn()\r\n else:\r\n self.computer_turn()\r\n self.check_game_end()",
"def play_game():\n pass",
"def start(self):\n\tglobal mode\n\tmode=\"./music/\"\n\tglobal message\n\tif message!=2:\n\t\tmessage=1\n\t\tbot.loop.create_task(play())",
"def play(self):\n start = time.time()\n self.search(0)\n end = time.time()\n\n print \"number of solutions: \" + str(self.count) + \"\\n\"\n print \"calculation time (sec): \" + str(end - start)",
"def play(self, pathFindingStategy: PathFindingStrategy) -> None:",
"def play(self):\r\n self.perform_strategy()",
"def run(self):\n # for running indefinitely if 'watch' is passed\n if self._arguments.watch:\n while True:\n self.watch(self.main(), int(self._arguments.watch))\n else:\n self.main()",
"def start(self):\n while True:\n #requests.get(\"http://localhost:8080/clear\")\n if use_launch_phrase:\n recognizer, audio = self.speech.listen_for_audio()\n if self.speech.is_call_to_action(recognizer, audio):\n self.__acknowledge_action()\n self.decide_action()\n else:\n self.decide_action()",
"def play(self):\n pass",
"def start(self, world):\n self.sense(world)",
"def run_im_bored():\n \n greet_user()\n \n bored = True\n \n while bored:\n generate_suggestion()\n bored = ask_to_continue()",
"async def async_locate(self, **kwargs: Any) -> None:\n await self._vacuum_bot.execute_command(PlaySound())",
"def forever(self):\n\tglobal message\n\tmessage=-1\n\tyield from bot.change_presence(game=discord.Game(type=1,name='Type \\\"!music start\\\" to start music'))",
"def start(self):\n while True:\n requests.get(\"http://localhost:8080/clear\") #clearing the screen on the web browser\n speech=\"Welcome to Smart Mirror !!\"\n requests.get(\"http://localhost:8080/statement?text=%s\" % speech) # calling the text to appear on the browser\n self.speech.synthesize_text(\"hello\"+speech) #synthesizing the text into speech\n speech1=\"Say The launch Phrase .\" #asking the user to say the lauch phrase\n self.speech.synthesize_text(speech1) #speaking of the above line,\n if self.vision.recognize_face(): #checking if\n print \"Face Found\"\t\t\t#the person is infront of camera\n if use_launch_phrase:\t\t\t#checking whether to use the launch phrase or not\n recognizer, audio = self.speech.listen_for_audio()\t\t#initializing\n if self.speech.is_call_to_action(recognizer, audio):\t#checking if the audio is recognized\n self.__acknowledge_action()\t\t\t#if it is recognized take action\n self.decide_action()\t\t\t#deciding which action to be taken\n else:\n self.decide_action()\t\t\t#printing the else part",
"def play(self):\n print('Playing game...')",
"def play(self):\n self.strategy(self)",
"def auto_play(self):\n raise NotImplementedError(self)",
"def start_game(self):\n\n\t\tpass",
"def start(update, context):\n update.message.reply_text('Hi! Glad you found me. Now I am not so lonely and have another friend. Are you ready to play?')",
"def play(self):\n self._play(self._move_person)",
"def run(self):\n # Let the user know what to do\n self.catchall()\n \n while(True):\n try:\n self.outfd(self.prompt)\n line = self.readline()\n # Only save the history if there is something to save\n if(len(line) > 0):\n self.log.append(line)\n self.cli.findMatch(line)\n\n except Exception as e:\n out.err('!! Error: %s\\n%s\\n' % (str(e), traceback.format_exc()))"
]
| [
"0.59182197",
"0.5831545",
"0.5824273",
"0.5812982",
"0.581012",
"0.579923",
"0.5789638",
"0.572785",
"0.5726542",
"0.57101417",
"0.56072944",
"0.5601628",
"0.55859745",
"0.555688",
"0.55301493",
"0.54577565",
"0.5447385",
"0.5443759",
"0.54365546",
"0.5422701",
"0.54151183",
"0.53749084",
"0.53682363",
"0.5367637",
"0.53509927",
"0.53192925",
"0.53138024",
"0.529917",
"0.5293095",
"0.5273207"
]
| 0.7234999 | 0 |
Stop the 'find me' mode. | def stop_find_me(self):
command = _build_robovac_command(RobovacModes.FIND_ME, RobovacCommands.STOP_RING)
message = self._build_command_user_data_message(command)
self._send_packet(message, False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __stopSearch(self):\n self.__cancelSearch = True",
"def stop_searching(self):\n self.searching = False\n self.receive_msg(\"No one is online :(\", keyboard=True)",
"def continue_search( self ):\n return True;",
"def stop(self):\n self._should_run = False",
"def stop(self):\n self._run = False",
"def deactivate(self):\n if self.search_behavior:\n self.search_behavior.stop()",
"def stop(self, force=False):\n pass",
"def stop_check(self):\n pass",
"def stop() -> None:",
"async def cancel_search(self, *, delay=0, lifespan=math.inf):\n await self.add_output(\"|/cancelsearch\", delay=delay, lifespan=lifespan)",
"def abort_search(self):\n self._raise_not_supported()",
"def _stop(self):\n return True",
"def cancel(self):\n end = self.start\n start = self.start + f'-{self.chars}c'\n self.text.tag_delete('found', 1.0, tk.END)\n self.text.tag_delete('found.focus', 1.0, tk.END)\n self.text.tag_add(tk.SEL, start, end)\n self.text.mark_set(tk.INSERT, start)\n self.text.focus_set()\n self.destroy()",
"def cancel(self):\n end = self.start\n start = self.start + f'-{self.chars}c'\n self.text.tag_delete('found', 1.0, tk.END)\n self.text.tag_delete('found.focus', 1.0, tk.END)\n self.text.tag_add(tk.SEL, start, end)\n self.text.mark_set(tk.INSERT, start)\n self.text.focus_set()\n self.destroy()",
"def stop(self):\n\t\tself._run_flag = False\n\t\tself.wait()",
"def need_stop(self, path):",
"def cancel_search(job):\n job.cancel()",
"def stop(self) -> None:",
"def stop(self) -> None:",
"def stop(self):",
"def stop(self):",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def _stop(self):",
"def stopWatchingFileSystem(self) :\n\n self.continueWatchingFS = False",
"def stop(self):\r\n # productive\r\n profprint()\r\n self.logic.changeCursor(0)\r\n self.removeObservers()\r\n self.fiducialObturatorButton.checked = 0\r\n self.fiducialButton.checked = 0\r\n self.validationNeedleButton.checked = 0"
]
| [
"0.6852051",
"0.65245056",
"0.6441062",
"0.60047376",
"0.59559894",
"0.59457433",
"0.59007186",
"0.58754736",
"0.57997805",
"0.5778056",
"0.5762534",
"0.57422465",
"0.5702933",
"0.5702933",
"0.5696768",
"0.56846666",
"0.56741786",
"0.5640267",
"0.5640267",
"0.56264794",
"0.56264794",
"0.5620886",
"0.5620886",
"0.5620886",
"0.5620886",
"0.5620886",
"0.5620886",
"0.559472",
"0.5563172",
"0.5554592"
]
| 0.7103667 | 0 |
Tell the RoboVac to use the standard fan speed. | def use_normal_speed(self):
command = _build_robovac_command(RobovacModes.SET_SPEED, RobovacCommands.SLOW_SPEED)
message = self._build_command_user_data_message(command)
self._send_packet(message, False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_fan_speed(self, value):\n self.parent.fancoolers.set_speed(value)",
"async def async_set_speed(self, value) -> None:\n await self.write_attributes_safe({\"fan_mode\": value})",
"def set_speed():\n pass",
"def setspeed(speed):\n if speed is None:\n click.echo(\"speed value is required\")\n raise click.Abort()\n\n for fan in range(_wrapper_get_num_fans()):\n status = _wrapper_set_fan_speed(fan, speed)\n if not status:\n click.echo(\"Failed\")\n sys.exit(1)\n\n click.echo(\"Successful\")",
"def set_fan_mode(self, fan_mode):\n self.api.device_control(self.obj_id, \"windSpeedSet\", {\"value\": fan_mode})",
"def set_machine_fan(self, val):\n if val > self.max_fan or val < 0:\n logger.error(\"/dev/ttyUSB{0} tried setting Fan speed {1}%\"\n .format(self.dev_id, val)\n )\n return False\n try:\n self._write(chr(self.incoming_fan))\n sleep(0.5)\n self._write(chr(val))\n return True\n except:\n logger.error(\"Cannot set Fan speed for /dev/ttyUSB{0}\"\n .format(self.dev_id)\n )\n return False",
"def use_max_speed(self):\n command = _build_robovac_command(RobovacModes.SET_SPEED, RobovacCommands.FAST_SPEED)\n message = self._build_command_user_data_message(command)\n\n self._send_packet(message, False)",
"def speed(self, s=0):",
"def set_speed(self,value):\n if (value>self.get_max_speed()):\n print \"asked to set the speed to %f but the max speed is %f\\n\" % (value,self.get_max_speed())\n else:\n return self.put_par(\"slew_speed\",value)",
"def setSpeed(self, v):\n\t\tconverted = self.convertSpeed(v)\n\t\tprint(converted)\n\t\t# set both stage speeds\n\t\tself.zaberSend(self.translation[\"hor\"], self.cmd[\"setTargetSpeed\"], data = converted)\n\t\tself.zaberSend(self.translation[\"ver\"], self.cmd[\"setTargetSpeed\"], data = converted)",
"def set_value(self, on_level):\n if on_level in FanSpeedRange.OFF:\n fan_speed = FanSpeed.OFF\n elif on_level in FanSpeedRange.LOW:\n fan_speed = FanSpeed.LOW\n elif on_level in FanSpeedRange.MEDIUM:\n fan_speed = FanSpeed.MEDIUM\n else:\n fan_speed = FanSpeed.HIGH\n self.value = fan_speed",
"def set_speed(self,speed):\n self.speed = speed",
"def set_speed(self, speed):\n self.device.set_speed(speed)\n return \"OK\"",
"def changeSpeed(self, speed, accel):\n\t\t\n max_speed = 1000\n min_speed = 0\n \n # limit max speed\n if speed >= max_speed:\n speed = max_speed\n \n # limit min speed\n if speed <= min_speed:\n speed = min_speed\n \n command = struct.pack(\"<BHHB\", 0x24, speed, accel, 0x01)\n self.sendCommand(command)",
"def get_fan_speed(self):\n return self.__fan_speed",
"def set_speed(self, speed):\n self.speed = speed",
"async def set_fan_speed(self, speed):\n\n if speed not in self.fan_speeds:\n raise ValueError(f\"Invalid fan speed: {speed}\")\n keys = self._get_cmd_keys(CMD_STATE_WIND_STRENGTH)\n speed_value = self.model_info.enum_value(keys[2], DHumFanSpeed[speed].value)\n await self.set(keys[0], keys[1], key=keys[2], value=speed_value)",
"def speed(self, speed):\n self._speed = speed\n self._rotspeed = speed",
"def set_speed(self,speed):\n self.speed_p = speed",
"def setSpeedEngine1(speed: int):\n pass",
"def set_speed(rpm):\n ret = _LIB.fan_click_set_speed(rpm)\n if ret < 0:\n raise Exception(\"fan click set speed failed\")",
"async def test_set_speed(hass: HomeAssistant, utcnow) -> None:\n helper = await setup_test_component(hass, create_fan_service)\n\n await helper.async_update(ServicesTypes.FAN, {CharacteristicsTypes.ON: 1})\n\n await hass.services.async_call(\n \"fan\",\n \"set_percentage\",\n {\"entity_id\": \"fan.testdevice\", \"percentage\": 100},\n blocking=True,\n )\n helper.async_assert_service_values(\n ServicesTypes.FAN,\n {\n CharacteristicsTypes.ROTATION_SPEED: 100.0,\n },\n )\n\n await hass.services.async_call(\n \"fan\",\n \"set_percentage\",\n {\"entity_id\": \"fan.testdevice\", \"percentage\": 66},\n blocking=True,\n )\n helper.async_assert_service_values(\n ServicesTypes.FAN,\n {\n CharacteristicsTypes.ROTATION_SPEED: 66.0,\n },\n )\n\n await hass.services.async_call(\n \"fan\",\n \"set_percentage\",\n {\"entity_id\": \"fan.testdevice\", \"percentage\": 33},\n blocking=True,\n )\n helper.async_assert_service_values(\n ServicesTypes.FAN,\n {\n CharacteristicsTypes.ROTATION_SPEED: 33.0,\n },\n )\n\n await hass.services.async_call(\n \"fan\",\n \"set_percentage\",\n {\"entity_id\": \"fan.testdevice\", \"percentage\": 0},\n blocking=True,\n )\n helper.async_assert_service_values(\n ServicesTypes.FAN,\n {\n CharacteristicsTypes.ON: 0,\n },\n )",
"def set_speed(self, speed):\r\n speed = float(speed)\r\n speed = int(round(speed * 27.7778))\r\n return self.send_command('speed %s' % speed)",
"def change_speed(self, action):\r\n if action == \"faster\":\r\n self.speed += 1\r\n else:\r\n if self.speed > 1:\r\n self.speed -= 1",
"def set_speed(self, speed: str) -> None:\n self.wink.set_state(True, speed)",
"def speed(self, speed: int, time: int = 0, /) -> None:",
"def speed(self, value: int, /) -> None:",
"def set_speed(self, speed):\n self._kernel.set_speed(float(speed))",
"def set_speed(self, v):\n self.v = v",
"def fan_speed(self) -> Optional[str]:\n return self._fan_speed"
]
| [
"0.76779467",
"0.736443",
"0.7363512",
"0.7304039",
"0.70294833",
"0.70021176",
"0.70012236",
"0.6961615",
"0.6888173",
"0.686537",
"0.67941016",
"0.6766449",
"0.67494905",
"0.6713837",
"0.6685503",
"0.6615061",
"0.66007936",
"0.65657735",
"0.65587157",
"0.65497774",
"0.65066075",
"0.6506144",
"0.6500302",
"0.6496758",
"0.6490471",
"0.6484794",
"0.6474204",
"0.64701754",
"0.64600194",
"0.64558667"
]
| 0.74292755 | 1 |
Tell the RoboVac to use the maximum possible fan speed. | def use_max_speed(self):
command = _build_robovac_command(RobovacModes.SET_SPEED, RobovacCommands.FAST_SPEED)
message = self._build_command_user_data_message(command)
self._send_packet(message, False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_max_speed(self, value):\n if self.mot_type == 'ims':\n return self.put_par(\"max_speed\",value)\n elif self.mot_type == 'xps8p':\n print \"asked to set the max speed to %f but max speed is read only for %s motors\\n\" % (value,mot_type)\n else:\n return self.put_par(\"max_speed\",value)",
"def set_speed(self,value):\n if (value>self.get_max_speed()):\n print \"asked to set the speed to %f but the max speed is %f\\n\" % (value,self.get_max_speed())\n else:\n return self.put_par(\"slew_speed\",value)",
"def set_max_speed(self):\n self._dll.JLINKARM_SetMaxSpeed()\n return None",
"def init_max_speed(self, speed):\n self.max_speed = speed",
"def max_speed(self) -> float:\n return 2",
"def set_machine_fan(self, val):\n if val > self.max_fan or val < 0:\n logger.error(\"/dev/ttyUSB{0} tried setting Fan speed {1}%\"\n .format(self.dev_id, val)\n )\n return False\n try:\n self._write(chr(self.incoming_fan))\n sleep(0.5)\n self._write(chr(val))\n return True\n except:\n logger.error(\"Cannot set Fan speed for /dev/ttyUSB{0}\"\n .format(self.dev_id)\n )\n return False",
"def set_fan_speed(self, value):\n self.parent.fancoolers.set_speed(value)",
"async def async_set_speed(self, value) -> None:\n await self.write_attributes_safe({\"fan_mode\": value})",
"def speed_count(self) -> int:\n return self._device.fan_speed_max",
"def max_speed(self) -> int:\n return self._max_speed",
"def max_front_wheel_speed():",
"def speed_max(self):\n return self._speed_max",
"def changeSpeed(self, speed, accel):\n\t\t\n max_speed = 1000\n min_speed = 0\n \n # limit max speed\n if speed >= max_speed:\n speed = max_speed\n \n # limit min speed\n if speed <= min_speed:\n speed = min_speed\n \n command = struct.pack(\"<BHHB\", 0x24, speed, accel, 0x01)\n self.sendCommand(command)",
"def limit_speed(speed):\n if speed > 900:\n speed = 900\n elif speed < -900:\n speed = -900\n return -speed",
"def use_normal_speed(self):\n command = _build_robovac_command(RobovacModes.SET_SPEED, RobovacCommands.SLOW_SPEED)\n message = self._build_command_user_data_message(command)\n\n self._send_packet(message, False)",
"def setspeed(speed):\n if speed is None:\n click.echo(\"speed value is required\")\n raise click.Abort()\n\n for fan in range(_wrapper_get_num_fans()):\n status = _wrapper_set_fan_speed(fan, speed)\n if not status:\n click.echo(\"Failed\")\n sys.exit(1)\n\n click.echo(\"Successful\")",
"def get_fan_speed(self):\n return self.__fan_speed",
"def limit_speed(speed):\n if speed > 1000:\n speed = 1000\n elif speed < -1000:\n speed = -1000\n return speed",
"def set_speed():\n pass",
"def set_limit_max():\n limit_max = request.params.get(\"limit_max\", 0, type=float)\n output = request.params.get(\"output\", 1, type=int)\n retval = RP_LIB.rp_LimitMax(output, ctypes.c_float(limit_max))\n if retval != 0:\n LOG.error(\"Failed to set maximum output voltage. Error code: %s\", ERROR_CODES[retval])",
"def servo_set_speed_limit(ch, accel):\n\n # Check to make sure speed is in range\n speed = max(accel, accel_limit_min)\n speed = min(accel, accel_limit_max)\n\n # Send command to servo controller\n servo_send_cmd(cmd_set_accel, ch, accel)",
"def get_max_speed(self):\n if self.mot_type == 'ims':\n return self.get_par(\"max_speed\")\n elif self.mot_type == 'xps8p':\n return self.get_par(\"max_speed_xps\")\n else:\n return self.get_par(\"max_speed\")",
"def _maximum(self) -> float:\n if self._type == \"power\":\n return 5.0\n elif self._type == \"setpoint\":\n return self._product.get_data_config_json()[\"_value_setpoint_max\"]\n elif self._type == \"fan1\":\n fan = 1\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan2\":\n fan = 2\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan3\":\n fan = 3\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]",
"def set_value(self, on_level):\n if on_level in FanSpeedRange.OFF:\n fan_speed = FanSpeed.OFF\n elif on_level in FanSpeedRange.LOW:\n fan_speed = FanSpeed.LOW\n elif on_level in FanSpeedRange.MEDIUM:\n fan_speed = FanSpeed.MEDIUM\n else:\n fan_speed = FanSpeed.HIGH\n self.value = fan_speed",
"def setSpeed(self, v):\n\t\tconverted = self.convertSpeed(v)\n\t\tprint(converted)\n\t\t# set both stage speeds\n\t\tself.zaberSend(self.translation[\"hor\"], self.cmd[\"setTargetSpeed\"], data = converted)\n\t\tself.zaberSend(self.translation[\"ver\"], self.cmd[\"setTargetSpeed\"], data = converted)",
"def max_voltage_limit(self, value):\n self._write(MX_MAX_VOLTAGE_LIMIT, value)",
"def fan_speed(self) -> Optional[str]:\n return self._fan_speed",
"def constrainSpeed(self, speedRPM):\n\n if speedRPM > self.motorMaxRPM:\n speedRPM = self.motorMaxRPM\n\n if speedRPM < 0.0:\n speedRPM = 0.0\n\n# print ( \"motorSpeed RPM adjusted: \", speedRPM )\n\n return speedRPM",
"def set_fan_mode(self, fan_mode):\n self.api.device_control(self.obj_id, \"windSpeedSet\", {\"value\": fan_mode})",
"def amp_max_hspeed(self, index):\n hspeed = ct.c_float()\n self.lib.GetAmpMaxSpeed(ct.c_int(index), ct.pointer(hspeed))\n return hspeed.value"
]
| [
"0.74900687",
"0.7249712",
"0.7098766",
"0.7085992",
"0.7081822",
"0.70329684",
"0.7032855",
"0.698288",
"0.6789867",
"0.6753174",
"0.674114",
"0.6709674",
"0.6582785",
"0.65548813",
"0.65360487",
"0.65114",
"0.6508013",
"0.647743",
"0.63822633",
"0.6367871",
"0.6339305",
"0.6302794",
"0.62981915",
"0.6271221",
"0.6239672",
"0.62318194",
"0.6217415",
"0.61810696",
"0.6177162",
"0.61492926"
]
| 0.797232 | 0 |
Tell the RoboVac to move forward without vacuuming. | def go_forward(self):
command = _build_robovac_command(RobovacModes.GO_FORWARD, RobovacCommands.MOVE)
message = self._build_command_user_data_message(command)
self._send_packet(message, False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def move_forward():\n pass",
"def forward( self ):\n self._has_change = True\n print( \"Forward\" )",
"def drive_forward(self):\n print(f\"{self.make.title()} is now driving forward.\")",
"def moveForward(self):\n if self.onGround:\n self.vx = 4",
"def go_backward(self):\n command = _build_robovac_command(RobovacModes.GO_BACKWARD, RobovacCommands.MOVE)\n message = self._build_command_user_data_message(command)\n\n self._send_packet(message, False)",
"def forward(self):\n self.position += 1",
"def moveBackward(self):\n if self.onGround:\n self.vx = -4",
"def move_back(self):\n\n # slowly drive backwards\n self.velocity = -1 * const.Driving.CAUTIOUS_VELOCITY\n self.angle = const.Driving.NEUTRAL_STEERING_ANGLE\n\n # drive as long there is enough space to the next vehicle or obstacle\n gap = self.formation.calc_gap()\n self.start_driving()\n while self.sensor_manager.rear > gap: continue\n\n self.stop_driving()",
"def _move_forward(enemy):\n\t\tBoard.board[enemy.x][enemy.y]=' '\n\t\tenemy.y += Enemy.vel \n\t\tif((enemy.x,(enemy.y)-Enemy.vel) in Board.triangle):\n\t\t\tBoard.board[enemy.x][(enemy.y)-Enemy.vel]='.'\n\n\t\tif(Board.board[enemy.x][enemy.y]=='M'):\n\t\t\tMario.lives -= 1\n\t\t\tif Mario.lives<=0:\n\t\t\t\treturn \"exit\"\n\t\t\tos.system('clear')\n\t\t\tprint(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\t\\t\\t\\t\\t\\tNumber of Mario left:\",Mario.lives)\n\t\t\tMario.respawn(enemy.x,enemy.y)\n\t\t\ttime.sleep(2)\n\n\t\tBoard.board[enemy.x][enemy.y]='@'",
"def GET_forward(self):\n self.roomba.DriveStraight(pyrobot.VELOCITY_FAST)\n time.sleep(1)\n self.roomba.SlowStop(pyrobot.VELOCITY_FAST)",
"def move_forward(self):\n self.x, self.y = self.compute_positions()",
"def go_left(self):\n command = _build_robovac_command(RobovacModes.GO_LEFT, RobovacCommands.MOVE)\n message = self._build_command_user_data_message(command)\n\n self._send_packet(message, False)",
"def step_forward(self):",
"def forward(self) -> None:\n self.system.notify(\"Jarvis::Forward\")\n self.media.fast_forward()",
"def left_forward(self):\n self.left_motor.run_forever(speed_sp=self.MAX_SPEED)",
"def right_forward(self):\n self.right_motor.run_forever(speed_sp=self.MAX_SPEED)",
"def _move_forward(self):\n\t\tself.x,self.y = Mario._get_coordinates(Board.prev_i,Board.prev_j,Board.prev_k)\n\t\tif(self.y<=798):\n\t\t\tself.y = self.y+1\n\t\t\tif Board.board[self.x][self.y]=='0':\n\t\t\t\tMario.score += 1\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y]='M'\n\n\t\t\telif Board.board[self.x][self.y]=='P':\n\t\t\t\tMario.lives+=1\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y]='M'\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_1-up.wav\"])\n\n\t\t\telif Board.board[self.x][self.y]=='A':\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y]='M'\n\t\t\t\tMario.attack = 1\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_powerup.wav\"])\n\n\t\t\telif Board.board[self.x][self.y]=='@':\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tMario.lives-=1\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_mariodie.wav\"])\n\t\t\t\tif Mario.lives<=0:\n\t\t\t\t\tcall([\"aplay\",\"-q\",\"smb_gameover.wav\"])\n\t\t\t\t\treturn \"exit\"\n\t\t\t\tos.system('clear')\n\t\t\t\tprint(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\t\\t\\t\\t\\t\\tNumber of Mario left\",Mario.lives)\n\t\t\t\tMario.respawn(self.x,self.y)\n\t\t\t\ttime.sleep(2)\n\t\t\t\tinit_board(Board.prev_i,Board.prev_j,Board.prev_k)\n\n\t\t\telif(Board.board[self.x][self.y]=='/'):\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x-1][self.y]='M'\n\n\t\t\telif Board.board[self.x][self.y]=='I':\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_stage_clear.wav\"])\n\t\t\t\tBoard.bonus_round()\n\n\t\t\telif Board.board[self.x][self.y]=='K':\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_stage_clear.wav\"])\n\t\t\t\tenemy.boss_round()\n\n\t\t\telif(Board.board[self.x][self.y] in obstacles):\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y-1]='M'\n\n\t\t\telif((Board.board[self.x+1][self.y-1]=='/' or Board.board[self.x+1][self.y-1]=='T') and Board.board[self.x+1][self.y]==' '):\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y+1]='M'\n\t\t\t\tMario.go_down(self)\n\t\t\telse:\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y]='M'\n\n\t\tif( self.y-1 >= ((Board.prev_j+Board.prev_k)/2) ):\n\t\t\tos.system('clear')\n\t\t\tBoard.prev_j += 1 \n\t\t\tBoard.prev_k += 1\n\t\t\tinit_board(Board.prev_i,Board.prev_j,Board.prev_k)\n\t\telse:\n\t\t\tos.system('clear')\n\t\t\tinit_board(Board.prev_i,Board.prev_j,Board.prev_k)",
"def forward(self):\n self.cursor.forward()",
"def go(self):\n global Moving\n\n if TargetVal > ActualVal:\n Moving = 'bak'\n elif TargetVal < ActualVal:\n Moving = 'fwd'\n\n MoveMotor()",
"def move_forward(self, dist):\r\n self.send_command_without_response(f'forward {dist}')",
"def move(self):\n pass",
"def move(self, direction, cycles):\n\t\tpass",
"def move_up(self):\n\n # slowly drive backwards\n self.velocity = const.Driving.MAX_VELOCITY\n self.angle = const.Driving.NEUTRAL_STEERING_ANGLE\n\n # drive as long there is enough space to the next vehicle or obstacle\n gap = self.formation.calc_gap()\n self.start_driving()\n while self.sensor_manager.front > gap: continue\n\n self.stop_driving()",
"def skip_forward(self) -> None:\n self.keyboard.press(Key.right)\n self.keyboard.release(Key.right)",
"def move_backward():\n pass",
"def AeroMove(self, pos):\r\n\r\n pass",
"def advance(self):\r\n #if see if the UFO is almost at the edge of the screen\r\n if (self.center.x >= SCREEN_WIDTH-20 or self.center.y >= SCREEN_HEIGHT-20):\r\n #if it is change the velocity to negative to reverse direction\r\n self.velocity.dx *= -2\r\n self.velocity.dy *= -2\r\n \r\n # set x equal to x plus dx\r\n self.center.x += self.velocity.dx\r\n # set y equal to y plus dy\r\n self.center.y += self.velocity.dy\r\n #draw the flying object at its new point.\r\n self.draw()",
"def up(self):\n self.forward(MOVE_DISTANCE)",
"def forward(self):\n raise NotImplemented",
"def forward(self):\n raise NotImplemented"
]
| [
"0.69637424",
"0.690487",
"0.6864352",
"0.6808601",
"0.6727196",
"0.6538748",
"0.65017474",
"0.64020467",
"0.63450474",
"0.6310254",
"0.6196319",
"0.6166948",
"0.6153368",
"0.61513495",
"0.6137633",
"0.61126256",
"0.6086981",
"0.60860014",
"0.60540205",
"0.6053337",
"0.6043093",
"0.59864295",
"0.5969268",
"0.5947248",
"0.5945844",
"0.59414816",
"0.593717",
"0.5911101",
"0.59086955",
"0.59086955"
]
| 0.7456065 | 0 |
Tell the RoboVac to move backward without vacuuming. | def go_backward(self):
command = _build_robovac_command(RobovacModes.GO_BACKWARD, RobovacCommands.MOVE)
message = self._build_command_user_data_message(command)
self._send_packet(message, False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def move_back(self):\n\n # slowly drive backwards\n self.velocity = -1 * const.Driving.CAUTIOUS_VELOCITY\n self.angle = const.Driving.NEUTRAL_STEERING_ANGLE\n\n # drive as long there is enough space to the next vehicle or obstacle\n gap = self.formation.calc_gap()\n self.start_driving()\n while self.sensor_manager.rear > gap: continue\n\n self.stop_driving()",
"def moveBackward(self):\n if self.onGround:\n self.vx = -4",
"def back(self):\n self.position -= 1",
"def drive_backward(self):\n\n print(f\"{self.make.title()} driving backward.\")",
"def move_backward():\n pass",
"def move_backward(self, dist):\r\n self.send_command_without_response(f'back {dist}')",
"def backward(self, speed):\n self.controller.reverse(speed)",
"def back(self, step):\r\n self.forward(-step)",
"def backward(self):\n raise NotImplementedError",
"def right_backward(self):\n self.right_motor.run_forever(speed_sp=-self.MAX_SPEED)",
"def backward(self, duration):\n self.set_motor(self.left_motor, 'right', 0.5)\n self.set_motor(self.right_motor, 'left', 0.5)\n time.sleep(duration)",
"def move_backward(self, distance):\r\n return self.move('back', distance)",
"def _move_backward(enemy):\n\t\tBoard.board[enemy.x][enemy.y]=' '\n\t\tenemy.y -= Enemy.vel\n\t\tif((enemy.x,(enemy.y)+Enemy.vel) in Board.triangle):\n\t\t\tBoard.board[enemy.x][(enemy.y)+Enemy.vel]='.'\n\n\t\tif(Board.board[enemy.x][enemy.y]=='M'):\n\t\t\tMario.lives -= 1\n\t\t\tif Mario.lives<=0:\n\t\t\t\treturn \"exit\"\n\t\t\tos.system('clear')\n\t\t\tprint(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\t\\t\\t\\t\\tNumber of Mario left:\",Mario.lives)\n\t\t\tMario.respawn(enemy.x,enemy.y)\t\t\n\t\t\ttime.sleep(2)\n\n\t\tBoard.board[enemy.x][enemy.y]='@'",
"def backward(self):\n #print('backward\\r')\n self.linearVector = Vector3(x=-1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)",
"def move_down(self):\n client.moveByVelocityAsync(0, 0, -1, 0.3).join()\n # if self.logging:\n # self.log_arr.append(\"down\")",
"def down(self):\n self.move(0,-1)",
"def backward(self):\n raise NotImplemented",
"def backward(self):\n raise NotImplemented",
"def backward(self):\n raise NotImplemented",
"def back( self ):\n self._has_change = True\n print( \"Back\" )",
"def backwards(self):\n pass",
"def backward(self, top, propagate_down, bottom):\r\n pass",
"def _unmove(self):\n (start, end) = self.history.pop()\n self._board[start] = self._board[end]\n self._board[end] = 0\n self.winner = None\n self.player_turn = CheckersGame.opposite[self.player_turn]",
"def backward(self, param):\n\t\tif param:\n\t\t\tself.linear_move(-1 * param * .3048)\n\t\telse:\n\t\t\tself.linear_move(-1 * riu.default_dist * .3048)",
"def backward(self, top, propagate_down, bottom):\n pass",
"def backward(self, top, propagate_down, bottom):\n pass",
"def backward(self, top, propagate_down, bottom):\n pass",
"def backward(self, top, propagate_down, bottom):\n pass",
"def backward(self, top, propagate_down, bottom):\n pass",
"def backward(self, top, propagate_down, bottom):\n pass"
]
| [
"0.7490403",
"0.7196898",
"0.7087098",
"0.70502883",
"0.69173604",
"0.6865075",
"0.6826124",
"0.68211675",
"0.6742248",
"0.6675336",
"0.6658164",
"0.665498",
"0.66415536",
"0.66358215",
"0.6597827",
"0.6591449",
"0.6572632",
"0.6572632",
"0.6572632",
"0.65611684",
"0.6511425",
"0.6499218",
"0.6494749",
"0.64920133",
"0.6476883",
"0.6476883",
"0.6476883",
"0.6476883",
"0.6476883",
"0.6476883"
]
| 0.8243355 | 0 |
Tell the RoboVac to turn left without vacuuming. | def go_left(self):
command = _build_robovac_command(RobovacModes.GO_LEFT, RobovacCommands.MOVE)
message = self._build_command_user_data_message(command)
self._send_packet(message, False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def turn_left(self):\n pass",
"def turn_left(self):\n temp = self.direction[0]\n self.direction[0] = self.direction[1]\n self.direction[1] = -temp",
"def turn_left(): #py:turn_left\n RUR._turn_left_()",
"def do_left_turn(robot_name):\n global current_direction_index\n\n current_direction_index -= 1\n if current_direction_index < 0:\n current_direction_index = 3\n\n return True, ' > '+robot_name+' turned left.'",
"def turn_left(self): #py:UR.turn_left\n RUR._UR.turn_left_(self.body)",
"def left(self):\n self.counterUp(teamNumber = 1)",
"def turn_left(self):\n\t\tself.direction = (self.direction - 1)%4",
"def go_left(self):\n self.change_x = -6\n self.direction = \"L\"",
"def turn_left(self):\n self.direction_mod_offset -= 1\n self.calculate_offset_mapping()\n direction_num = self.direction_mod_offset % len(self.direction_arr)\n client.rotateToYawAsync(direction_num * 90).join()",
"def leftTurn(self):\n #print('leftTurn\\r')\n self.linearVector = Vector3(x=0.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=1.0)",
"def goLeft(self, seconds):\n self.change_x = -5",
"def turn_left(self):\n self.facing_direction -= self.config\n if self.facing_direction < 0:\n self.facing_direction += 8\n self.x, self.y = self.compute_positions()",
"def turn_left(self):\n turn = self.__heading + Ship.TURN\n if turn >= Ship.MAX_HEADING:\n turn -= Ship.MAX_HEADING\n self.__heading = turn",
"def goLeft(self):\n check = self.rover.moveLeft()\n if check == True:\n self._checkPortal(self.getRoverLocation())",
"def go_left(self):\n self.change_x = -6",
"def go_left(self):\n self.change_x = -6",
"def GET_left(self):\n self.roomba.TurnInPlace(pyrobot.VELOCITY_SLOW, 'ccw')\n time.sleep(0.5)\n self.roomba.Stop()",
"def move_left(self):\n if self.change_valid(dx=-1):\n self.x -= 1",
"def left(self):\n self.move(-1, 0)",
"def turn_left(self, duration):\n self.set_motor(self.left_motor, 'right', 0.5)\n self.set_motor(self.right_motor, 'right', 0.5)\n time.sleep(duration)",
"def go_left(self):\n self.rect.centerx -= self.__dx",
"def start_turn_left(self, rate=RATE):\n action = StartTurnLeft(rate=rate)\n self._velocity_control_client(pickle.dumps(action))",
"def rotate_left(self):\n if self.change_valid(dr=-1):\n self.rotate = (self.rotate-1)%4",
"def changeLaneLeft(self, speed, accel):\n self.changeLane(speed, accel, -44.5)",
"def left(self):\r\n if self.d in direction_tuple:\r\n index = direction_tuple.index(self.d)\r\n if index == 0:\r\n self.d = direction_tuple[3]\r\n else:\r\n self.d = direction_tuple[index - 1]\r\n else:\r\n print(\"NO VALID ROBOT POSITION\")",
"def move_left(self):\r\n self._time += 1\r\n if self._position > 0:\r\n self._position -= 1\r\n return True\r\n else:\r\n return False",
"def move_left(self):\n self._time += 1\n if self._position > 0:\n self._position -= 1\n return True\n else:\n return False",
"def left_forward(self):\n self.left_motor.run_forever(speed_sp=self.MAX_SPEED)",
"def move_left(self):\r\n if self.rect.left > 0:\r\n self.rect.left -= self.speed",
"def go_left(self):\n self.rect.centerx -= 9"
]
| [
"0.7898152",
"0.7493305",
"0.7311259",
"0.70578676",
"0.7055431",
"0.70288837",
"0.70254827",
"0.69394284",
"0.692184",
"0.6902282",
"0.68978935",
"0.6850267",
"0.68196297",
"0.68056685",
"0.6805268",
"0.6805268",
"0.6784195",
"0.662146",
"0.6527624",
"0.64925176",
"0.64281565",
"0.63973206",
"0.6374832",
"0.6372964",
"0.63637066",
"0.6302037",
"0.63000107",
"0.62435114",
"0.6233461",
"0.6224296"
]
| 0.78428185 | 1 |
Tell the RoboVac to turn right without vacuuming. | def go_right(self):
command = _build_robovac_command(RobovacModes.GO_RIGHT, RobovacCommands.MOVE)
message = self._build_command_user_data_message(command)
self._send_packet(message, False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def turn_right(self):\n pass",
"def turn_right(self):\n temp = self.direction[0]\n self.direction[0] = -self.direction[1]\n self.direction[1] = temp",
"def rightTurn(self):\n #print('rightTurn\\r')\n #self.linearVector = Vector3(x=0.0, y=0.0, z=0.0)\n #self.angularVector = Vector3(x=0.0, y=0.0, z=-1.0)\n self.linearVector = Vector3(x=0.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=-1.0)",
"def do_right_turn(robot_name):\n global current_direction_index\n\n current_direction_index += 1\n if current_direction_index > 3:\n current_direction_index = 0\n\n return True, ' > '+robot_name+' turned right.'",
"def turn_right(self):\n self.direction_mod_offset += 1\n self.calculate_offset_mapping()\n direction_num = self.direction_mod_offset % len(self.direction_arr)\n client.rotateToYawAsync(direction_num * 90).join()",
"def goRight(self):\n check = self.rover.moveRight()\n if check == True:\n self._checkPortal(self.getRoverLocation())",
"def go_right(self):\n self.change_x = 6\n self.direction = \"R\"",
"def turn_right(self):\n self.facing_direction += self.config\n if self.facing_direction > 7:\n self.facing_direction -= 8\n self.x, self.y = self.compute_positions()",
"def GET_right(self):\n self.roomba.TurnInPlace(pyrobot.VELOCITY_SLOW, 'cw')\n time.sleep(0.5)\n self.roomba.Stop()",
"def right(self):\n self.counterUp(teamNumber = 2)",
"def turn_right(self):\n turn = self.__heading - Ship.TURN\n if turn < Ship.MIN_HEADING:\n turn += Ship.MAX_HEADING\n self.__heading = turn",
"def turn_left(self):\n pass",
"def relax(self):\n rospy.loginfo('Now Arm will be relax')\n self.go_with2([0, 0, 0, 0, 0])",
"def turn(self):\n pass",
"def right_twist(self):\n self.turn_by_deg(180)\n #time.sleep(.1)\n self.stop()\n self.turn_by_deg(180)\n #time.sleep(.1)\n self.stop()",
"def turn_left(): #py:turn_left\n RUR._turn_left_()",
"def turn_ship_right(self):\n self.degrees -= movement",
"def turn_right(self, duration):\n self.set_motor(self.left_motor, 'left', 0.5)\n self.set_motor(self.right_motor, 'left', 0.5)\n time.sleep(duration)",
"def goRight(self, seconds):\n self.change_x = 5",
"def right(self):\n self.move(1,0)",
"def stopRotatingRight(self,event):\n self.isRotatingRight=False",
"def turn_left(self):\n temp = self.direction[0]\n self.direction[0] = self.direction[1]\n self.direction[1] = -temp",
"def right():\n Robot.rotate(\"RIGHT\")",
"def start_turn_right(self, rate=RATE):\n action = StartTurnRight(rate=rate)\n self._velocity_control_client(pickle.dumps(action))",
"def turnRight(ev3):\n ev3.set_angle(\"A\", \"30\", \"90\")\n ev3.set_angle(\"B\", \"-30\", \"-90\")\n ev3.set_angle(\"C\", \"30\", \"90\")",
"def right_forward(self):\n self.right_motor.run_forever(speed_sp=self.MAX_SPEED)",
"def change_player_turn(self):\r\n self._player_turn *= -1",
"def right_turn(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=left_speed)\n self.right_motor.run_forever(speed_sp=-right_speed)",
"def ReflexVacuumAgent():\n def program(percept):\n location, status = percept\n if status == 'Dirty':\n return 'Suck'\n elif location == loc_A:\n return 'Right'\n elif location == loc_B:\n return 'Left'\n return Agent(program)",
"def turn_right(self, speed):\n\t\t# You should modify the bias of 4 wheels depending on your hardware.\n\t\tself._front_left_wheel.anticlockwise_rotate(speed + LEFT_FR_BIAS + LEFT_RIGHT_BIAS)\n\t\tself._front_right_wheel.clockwise_rotate(1 + RIGHT_FR_BIAS)\n\t\tself._rear_left_wheel.anticlockwise_rotate(speed + LEFT_RIGHT_BIAS)\n\t\tself._rear_right_wheel.clockwise_rotate(1)"
]
| [
"0.74713826",
"0.6848282",
"0.663373",
"0.6517492",
"0.63592947",
"0.6337446",
"0.6301318",
"0.62907267",
"0.62155974",
"0.614369",
"0.6089513",
"0.6063729",
"0.6042315",
"0.6024011",
"0.6018741",
"0.601695",
"0.6016896",
"0.599335",
"0.59697616",
"0.5962759",
"0.59334576",
"0.59245807",
"0.59207886",
"0.5912106",
"0.5902941",
"0.5900574",
"0.58982676",
"0.5859998",
"0.58489025",
"0.58188146"
]
| 0.71917444 | 1 |
Send a packet to the RoboVac. This method handles all the required encryption. Will attempt to reconnect to the RoboVac if sending a packet fails. | def _send_packet(self,
packet: LocalServerInfo_pb2.LocalServerMessage,
receive: True) -> Union[None, LocalServerInfo_pb2.LocalServerMessage]:
raw_packet_data = packet.SerializeToString()
encrypted_packet_data = _encrypt(raw_packet_data)
try:
self.s.send(encrypted_packet_data)
except Exception as e:
logging.exception(e)
self.disconnect()
self.connect()
self.s.send(encrypted_packet_data )
if not receive:
return None
response_from_robovac = self.s.recv(1024)
decrypted_response = _decrypt(response_from_robovac)
return Robovac._parse_local_server_message_from_decrypted_response(decrypted_response) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _send_packet(self, packet: bytes):\n self._transport.sendto(packet, self._caddr)",
"def send_packet(self, pk):\n try:\n self.out_queue.put(pk, True, 2)\n except queue.Full:\n if self.link_error_callback:\n self.link_error_callback('RadioDriver: Could not send packet'\n ' to copter')",
"def send(self, packet):\n self._serport.send(packet + \"\\r\")",
"def send_packet(sender, payload):\n sender.write(payload)",
"def send_packet(sender, payload):\n\n sender.write(payload)",
"def send(self, byte: bytes): # 发送TCP数据,将string中的数据发送到连接的套接字。返回值是要发送的字节数量,该数量可能小于string的字节大小。\n # assert self._send_to, \"Connection not established yet. Use sendto instead.\"\n #############################################################################\n # TODO: YOUR CODE HERE #\n #############################################################################\n if self.isConnected:\n self.pktTime.clear() # 初始化发包时间\n pieces_size = 100\n datas = self.slice_into_pieces(byte, pieces_size) # 将包切片\n print('send:need to send {} pkts'.format(len(datas)))\n sw = SendingWindow(window_size=10, datas=datas, sender_time_out_method=self.sender_time_out) # 初始化发送窗口\n ack_finish = False\n for seq, seg in sw.buffer.items(): # 将窗口内的包发送\n print(\"send:send pkt \", seg.seqNumber)\n # time.sleep(self.RTT)\n self.sendto(seg.getSegment(), self.connectAddr)\n self.pktTime[seq] = time.time()\n\n while ack_finish is False: # 开始发送\n\n buffer, addr = self.recvfrom(1024) # 接受ack信息\n\n # head = buffer[:18]\n seg = segment.parse(buffer)\n\n #如果现在检测到对方在发,那么久改为recv模式\n if seg.payload!=b'':\n\n #TODO: 保守之策\n data = self.recv(2048)\n return\n\n if segment.Checksum(seg) is False:\n print('send: Check sum false')\n continue\n\n if seg.ackNumber in sw.buffer.keys():\n print(f'send: seg ack number {seg.ackNumber} in sw keys {sw.buffer.keys()} and payload is {str(seg.payload)}')\n con = sw.ack(seg.ackNumber) # 通知发送窗口接收到了包并且返回结果\n error = time.time() - self.pktTime[seg.ackNumber]\n self.RTT = self.RTT * (1 - self.rttRate) + self.rttRate * error\n sw.time_out = self.RTT\n\n if type(con) == list: # 返回结果:链表,链表中是滑动窗口后新加入的包,将其一一发送\n # print('sender: start to slide send window')\n for segg in con:\n # TODO:ADD TIME OUT\n # time.sleep(self.RTT)\n print(\"send:send pkt \", segg.seqNumber)\n self.sendto(segg.getSegment(), self.connectAddr)\n self.pktTime[segg.seqNumber] = time.time()\n\n elif con==True: # 返回结果:真,说明发送完毕\n ack_finish = True\n # print('sender: send finish')\n else:\n print(f'send: seg ack number {seg.ackNumber} not in sw keys {sw.buffer.keys()}')\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################",
"def _send(self, data):\n self._sock.send(self._cipher_tx.crypt(data))",
"def SendPacketToReceiver(self, packet):\n self.network_simulator.SendToReceiver(packet)",
"def true_send(conn, data):\n encrypted_data = key.encrypt(pickle.dumps(data))\n length = str(len(encrypted_data)).zfill(LENGTH).encode()\n data = length + encrypted_data\n conn.send(data)",
"def send(\n self,\n socket: socket.socket,\n content_type: ContentType,\n data: bytes,\n transaction: int = 0,\n status: int = 0,\n ) -> None:\n body = pack(\">3I\", self.VERSION, transaction, content_type) + data\n encrypted = self.enc.encrypt(body)\n length = 4 + len(encrypted)\n data = pack(\">2I\", length, status) + encrypted\n logger.debug(\"Sending %d bytes (total %d bytes)\" % (length, len(data)))\n socket.sendall(data)",
"def send_packet(self, p: str):\n\n self._send(p, self.socket)",
"def send(self, bytes:bytes):\n assert self._send_to, \"Connection not established yet. Use sendto instead.\"\n #############################################################################\n # TODO: YOUR CODE HERE #\n #############################################################################\n raise NotImplementedError()\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################",
"def send_packet(self, pkt):\n if isinstance(pkt, packet.AuthPacket):\n if pkt.auth_type == 'eap-md5':\n # Creating EAP-Identity\n password = pkt[2][0] if 2 in pkt else pkt[1][0]\n pkt[79] = [struct.pack('!BBHB%ds' % len(password),\n EAP_CODE_RESPONSE,\n packet.current_id,\n len(password) + 5,\n EAP_TYPE_IDENTITY,\n password)]\n reply = self._send_packet(pkt, self.authport)\n if (\n reply\n and reply.code == PacketCode.ACCESS_CHALLENGE\n and pkt.auth_type == 'eap-md5'\n ):\n # Got an Access-Challenge\n eap_code, eap_id, eap_size, eap_type, eap_md5 = struct.unpack(\n '!BBHB%ds' % (len(reply[79][0]) - 5), reply[79][0]\n )\n # Sending back an EAP-Type-MD5-Challenge\n # Thank god for http://www.secdev.org/python/eapy.py\n client_pw = pkt[2][0] if 2 in pkt else pkt[1][0]\n md5_challenge = hashlib.md5(\n struct.pack('!B', eap_id) + client_pw + eap_md5[1:]\n ).digest()\n pkt[79] = [\n struct.pack('!BBHBB', 2, eap_id, len(md5_challenge) + 6,\n 4, len(md5_challenge)) + md5_challenge\n ]\n # Copy over Challenge-State\n pkt[24] = reply[24]\n reply = self._send_packet(pkt, self.authport)\n return reply\n elif isinstance(pkt, packet.CoAPacket):\n return self._send_packet(pkt, self.coaport)\n else:\n return self._send_packet(pkt, self.acctport)",
"def __send_message(self, data):\n if RemotePlayerProxy.DEBUG:\n print(f'[RPP] [SEND] -> [{self.name}]: {data}')\n\n try:\n self.__socket.sendall(bytes(data, 'ascii'))\n except Exception as e:\n if RemotePlayerProxy.DEBUG:\n print(e)",
"def send_packet(self, msg_type='change'):\n packet = self.handler.prepare_packet()\n log.debug(\"Preparing to send packet for {name}\", name=self.name)\n if packet and packet != {}:\n log.debug(\"Sending packet: {packet} to: {name}\", packet=packet, name=self.name)\n self.connection.send({'type': msg_type, 'packet': packet})",
"def send(self, source_socket):\n\n source_socket.send_packet(self.packet.packet)",
"def sendPacket(self, pack):\n\t\t\n\t\tself.sendCall(pack)",
"def sendpkt(self, data, retries=10): \n wire_data = self.pack(data).encode()\n self.logger.debug('sending> %s', data) \n self.s.send(wire_data)\n res = self.rxqueue.get()\n while res != '+':\n self.s.send(wire_data)\n res = self.rxqueue.get()\n retries -= 1\n if retries == 0:\n raise ValueError(\"retry fail\")",
"def send(self, data:bytes):\n packet = Rudp.Packet(self.seq, 0, data)\n packet.timesamp = time()\n self.sendPacket(packet)\n self.seqPlusOne()\n return(packet)",
"def _do_send_packet(self, seqnum):\n sch_packet = self._sending_window[seqnum]\n if sch_packet.retries >= constants.MAX_RETRANSMISSIONS:\n self.shutdown()\n else:\n self._proto.send_datagram(sch_packet.rudp_packet, self.relay_addr)\n sch_packet.timeout_cb = REACTOR.callLater(\n sch_packet.timeout,\n self._do_send_packet,\n seqnum\n )\n sch_packet.retries += 1\n self._cancel_ack_timeout()",
"def send_packet(self, raw_packet):\n\n if self.verbose:\n print(\"< %s\" % \" \".join(\"%02x\" % i for i in raw_packet))\n\n # Send the data to the device.\n self.ftdi.write(self.ftdi.INTERFACE_A, raw_packet, async_=False)",
"def writer(self):\n #while self.alive:\n try:\n icmpreq = ethernet.Ethernet(src_s=\"dc:a6:32:00:a7:8b\", dst_s=\"ec:84:b4:3e:c8:20\", type=ethernet.ETH_TYPE_IP) +\\\n ip.IP(p=ip.IP_PROTO_ICMP, src_s=\"192.168.1.35\", dst_s=\"172.217.166.110\") +\\\n icmp.ICMP(type=8) +\\\n icmp.ICMP.Echo(id=1, ts=123456789, body_bytes=b\"12345678901234567890\")\n self.serial.write(icmpreq.bin()+b'~')\n except socket.error as msg:\n print(msg)\n self.stop()",
"def __send_to_socket(self, sock, data):\n # encrypt the data\n encrypted = Commands.encrypt(data)\n # get the data's length\n data_len = Commands.pad_length(len(encrypted))\n # send the whole message - length and then the data itself encrypted\n sock.send(data_len + encrypted)\n # if DEBUG MODE on then print the data we sent\n if self.__DEBUG:\n address = self.__get_address_by_socket(sock)\n print >> sys.__stdout__, \"Sent to <%s : %s> the following command:\\n%s\" % (address[0], address[1], data)\n # return true\n return True",
"def send(self, packet):\n self._loop.create_task(self.send_coro(packet))",
"async def _send_if_possible(self, packet_type: str, data: Any,) -> None:\n\n try:\n await self.send(packet_type, data, await_reply=False)\n except IncorrectStateException:\n logger.debug(\"Could not send (disconnecting or already disconnected)\")",
"def sendData(packet: FrameStruct, repeats: int) -> NoReturn:\n ftype = b'\\x08\\x00'\n dur = b'\\x00\\x00'\n # random hex stream, could be used as additional space of bits\n src = b'\\x08\\x00\\x27\\x8e\\x75\\x44'\n # broadcast address is used to stop certain drivers retransmitting frames\n dst = b'\\xff\\xff\\xff\\xff\\xff\\xff'\n bssid = src\n # semi unique id, annoyingly not usable due to lack of bits for this appli\n sn = (random.randint(0, 4096))\n sn = sn << 4\n seq = sn.to_bytes(4, 'little')\n\n # generate 80211 header\n header80211 = ftype + dur + dst + src + bssid + seq\n\n # combine header with other data to create valid frame\n data = globaldat.RADIO_TAP + header80211 + b\"\\x72\\x6f\\x62\\x6f\\x74\" + \\\n packet # attach radiotap headers, 80211 headers and yodel payload\n #globaldat.bytesPrint(data)\n #print(repeats)\n for i in range(repeats): # re-transmmit message a couple times\n globaldat.yodelSocket.send(data) # send the data",
"def send_packet(self,packet):\n dbgprint('send_packet()')\n self.usb.write('\\x00'+packet)\n self.usb.flush()\n n=ord(self.usb.read(1))\n if n&0x80: raise RuntimeError(self.usb.read(n&0x3f))\n return self.usb.read(n) if n else None",
"def send(self, packet_type=None, **kwargs):\n p = Packet(type=packet_type, payload=kwargs, handler=self.handler)\n self.sock.send(bytes(p))",
"def send(self, data):\n print \"Attempting to send packet of size %d to %s\" % (len(data), self.hostname)\n self.sock.sendto(data, (self.dst_ip, 0))",
"def _send_packet_safe(self, cr, packet):\n # packet = bytearray(packet)\n packet[0] &= 0xF3\n packet[0] |= self._curr_up << 3 | self._curr_down << 2\n resp = cr.send_packet(packet)\n if resp and resp.ack and len(resp.data) and \\\n (resp.data[0] & 0x04) == (self._curr_down << 2):\n self._curr_down = 1 - self._curr_down\n if resp and resp.ack:\n self._curr_up = 1 - self._curr_up\n\n return resp"
]
| [
"0.64176506",
"0.63706136",
"0.6224615",
"0.6196162",
"0.6173141",
"0.6123173",
"0.6081629",
"0.6043275",
"0.5993913",
"0.59894913",
"0.59562594",
"0.59385705",
"0.58800924",
"0.58748245",
"0.5874205",
"0.58084327",
"0.5799395",
"0.57993656",
"0.57781637",
"0.5766678",
"0.5758829",
"0.57315075",
"0.5673605",
"0.5669687",
"0.5656138",
"0.5648627",
"0.56464124",
"0.5636745",
"0.5630764",
"0.56250155"
]
| 0.6370769 | 1 |
Check if token is revoked | def is_revoked(self, token: str) -> bool:
return token in self.revoked_tokens | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_token_is_revoked(self, jti: str) -> None:\n redis = self._conn_redis()\n entry = redis.get(jti)\n if entry and entry == 'true':\n raise HTTPException(status_code=401,detail=\"Token has been revoked\")",
"def is_token_revoked(decoded_token):\n jti = decoded_token['jti']\n token = BlacklistedToken.query.filter_by(jti=jti).first()\n return token is not None",
"def is_token_revoked(decoded_token):\r\n jti = decoded_token['jti']\r\n try:\r\n token = TokenBlacklist.query.filter_by(jti=jti).one()\r\n return token.revoked\r\n except NoResultFound:\r\n return True",
"def test_logout_revoked(self):\n response = self.client.post('/api/v2/auth/logout',\n headers=self.attendant_headers)\n response = self.client.post('/api/v2/auth/logout',\n headers=self.attendant_headers)\n self.assertEqual(response.status_code, 401)\n self.assertIn('Token has been revoked', str(response.data))",
"def test_is_revoked(self):\n self.assertEqual(self.project.is_revoked(), False)",
"def deauth(request):\n\n if(request.token):\n request.token.delete()\n return JsonResponse({'message': 'Your token is revoked'}) \n else:\n return HttpResponseBadRequest('It does not make sense to revoke a token ' +\n 'if no token are supplied to the request')",
"def check_token_invalidate(self, token):\n payload = {'key': self._lr_object._get_api_key(), 'secret': self._lr_object._get_api_secret(), 'access_token': token}\n url = SECURE_API_URL + \"api/v2/access_token/invalidate/\"\n return self._lr_object._get_json(url, payload)",
"def verify_reset_token(self, token):\n\n expired, invalid, data = self._verify_token(token)\n if data and data.get('id') == self.id and data.get('op') == 'reset':\n data = True\n else:\n data = False\n return expired, invalid, data",
"def revoke_token():\n return server.create_endpoint_response(RevocationEndpoint.ENDPOINT_NAME)",
"def revoke_token():\n json_request = request.json\n refresh_token = json_request.get('refresh_token')\n if not refresh_token:\n return msg.errors.bad_request(\n 'You should provide refresh token for this call')\n RefreshToken.revoke(refresh_token)\n db.session.commit()\n return msg.success('Token is successfully revoked')",
"def check_token(self, user, token):\n\n # Parse the token\n try:\n ts_b36, hash = token.split(\"-\")\n except ValueError:\n return False\n\n try:\n ts = base36_to_int(ts_b36)\n except ValueError:\n return False\n\n # Check that the timestamp/uid has not been tampered with\n recomputed_token = self._make_token_with_timestamp(user, ts)\n\n log.debug(\"Ricalcolo re_token=%s token=%s\" % (recomputed_token, token))\n if not constant_time_compare(recomputed_token, token):\n return False\n\n # Check the timestamp is within limit\n if (self._num_days(self._today()) - ts) > settings.REFERRAL_TOKEN_RESET_TIMEOUT_DAYS:\n return False\n\n return True",
"def verify_token(self, token):\n return False",
"def token_valid_check(start_time):\n #calculate the time elapsed since token was last refreshed\n elapsed_time = time.time() - start_time\n #take action if token is expired\n if elapsed_time > 3540:\n return False\n return True",
"def token_is_expired(self):\n # type: () -> bool\n token = self.token\n if not token:\n return False\n\n return token[\"expires_at\"] < time()",
"async def revoke_token(self, request: Request, token: str) -> None:\n token_record = ...\n token_record.revoked = True\n token_record.save()",
"def test_revoke_refresh_token(client, tokens):\n response = client.delete(\n \"/auth/refresh-token/\",\n headers={\"Authorization\": \"Bearer {}\".format(tokens[\"refresh\"])},\n )\n\n payload = response.get_json()\n assert response.status_code == HTTPStatus.OK\n assert payload[\"msg\"] == \"Refresh token successfully revoked\"",
"def revoke_token(self, token, token_type_hint, request, *args, **kwargs):\n if token_type_hint:\n tok = self._tokengetter(**{token_type_hint: token})\n else:\n tok = self._tokengetter(access_token=token)\n if not tok:\n tok = self._tokengetter(refresh_token=token)\n\n if tok and tok.client_id == request.client.client_id:\n request.client_id = tok.client_id\n request.user = tok.user\n tok.delete()\n return True\n\n msg = 'Invalid token supplied.'\n log.debug(msg)\n request.error_message = msg\n return False",
"def is_expired(self, token: str) -> bool:\n try:\n decoded_token = jwt.decode(token, options=self._options)\n except jwt.ExpiredSignatureError: # type: ignore\n return True\n else:\n if decoded_token['exp'] - time.time() >= self.renew_buffer:\n # If the token will expire in less than cls._renew_buffer amount of time in seconds, the token is\n # considered expired.\n return True\n else:\n return False",
"def get_token(request):\n try:\n ft_session = request.session['ft_token']\n token = OAuthAccessToken.objects.get(session_key=ft_session)\n # invalidate any token > 24 hours old\n now = datetime.now()\n diff = now - token.created\n if diff.days:\n token.delete()\n return False\n # TODO check ip address matches\n #oauthorize\n return token\n except KeyError:\n print 'no session token..'\n except OAuthAccessToken.DoesNotExist:\n print 'no access token ...'\n return False",
"def is_blacklisted(token):\n if Revoked.query.filter_by(token=token).first():\n return True\n return False",
"def __check_token(self) -> bool:\r\n\r\n now = datetime.now(self.__tz)\r\n\r\n if (self.__token_expiration_date - now).total_seconds() < 0:\r\n log.debug('Token needs update!')\r\n return self.__update_token()\r\n return False",
"async def revoke_token(self, token: str, audience: str) -> None:\n ts = datetime.datetime.utcnow()\n if token in self.revoked_tokens:\n return None\n exp = datetime.datetime.fromtimestamp(get_exp_from_jwt(token, audience))\n msg = {\n \"token\": token,\n \"ts\": ts.isoformat(),\n \"expired\": exp.isoformat(),\n }\n self.publish(smart_bytes(orjson.dumps(msg)), \"revokedtokens\", 0)\n while token not in self.revoked_tokens:\n async with self.revoked_cond:\n await self.revoked_cond.wait()\n e2e = (datetime.datetime.utcnow() - ts).total_seconds()\n timeout = min(max(e2e * 3, 1), 30)\n await asyncio.sleep(timeout)",
"def test_refreshes_token_when_expired(self):\n\n badgr = self.get_badgr_setup()\n\n # _token_data isn't meant to be exposed; pylint: disable=W0212\n original_token = badgr._token_data['access_token']\n with vcr.use_cassette('tests/vcr_cassettes/expired_auth_token.yaml'):\n badgr.get_from_server(self._sample_url)\n self.assertNotEqual(original_token,\n badgr._token_data['access_token'])",
"def check_token(self, token):\n if not token or not self.verification_token:\n return False\n if not constant_time_compare(token, self.verification_token):\n return False\n if self.is_verified:\n return False\n age = timezone.now() - self.added_date\n if age >= timedelta(days=AssociatedEmail.VERIFICATION_TIMEOUT_DAYS):\n return False\n return True",
"def _is_oauth_token_valid(token: dict, time_key=\"expires_on\") -> bool:\n if \"access_token\" not in token or token.get(\"token_type\", \"\") != \"Bearer\" or time_key not in token:\n raise AirflowException(f\"Can't get necessary data from OAuth token: {token}\")\n\n return int(token[time_key]) > (int(time.time()) + TOKEN_REFRESH_LEAD_TIME)",
"def test_token_was_blacklisted(self):\n\n revoked_token = RevokedToken('secret_token_blacklisted')\n revoked_token.save()\n\n self.assertTrue(\n RevokedToken.is_jti_blacklisted('secret_token_blacklisted'))",
"def validate_token():\n try:\n token = validate_auth()\n except Unauthorized:\n return jsonify(valid=False, expires_in=0)\n expires = oidc.user_getfield('exp')\n delta = expires - datetime.now().timestamp()\n return jsonify(valid=True, expires_in=delta)",
"def validate_token():\n global vault_token\n global vault_token_time\n\n if vault_token is None:\n return False\n\n return datetime.datetime.now() < vault_token_time",
"def expired(token):\n token = session.query(PasswordRecoveryToken)\\\n .filter(PasswordRecoveryToken.token == token)\\\n .first()\n return token.expiration < datetime.now()",
"async def check_token_works(self) -> bool:\n async with self.web_session.get(url=self._user_endpoint, headers=self._headers) as resp:\n self._expired_token = not resp.status == 200\n return not self._expired_token"
]
| [
"0.85325533",
"0.83196324",
"0.81468725",
"0.73686993",
"0.6955454",
"0.69062203",
"0.69048154",
"0.6787738",
"0.6749311",
"0.6746789",
"0.6666206",
"0.66284657",
"0.6612462",
"0.65676683",
"0.6503612",
"0.6446665",
"0.64310426",
"0.64228463",
"0.641565",
"0.6403295",
"0.6384113",
"0.6376415",
"0.6339788",
"0.63386244",
"0.63156766",
"0.6304856",
"0.63045645",
"0.6301804",
"0.6300004",
"0.62910897"
]
| 0.8320756 | 1 |
Sets the edges of this tree. | def set_edges(self, edges):
assert len(edges) == self._num_edges
self._tree_grid = make_tree(edges) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_edges(self, edges):\n self._tree.set_edges(edges)\n self._program = make_propagation_program(self._tree.tree_grid)",
"def edges(self, edges):\n\n self._edges = edges",
"def edges(self, e):\n self._edges = e",
"def set_edges(self, edges):\n if (not isinstance(edges, None.__class__) and (edges.size != 0)):\n if ((np.shape(edges)[0] != self.E) or (np.shape(edges)[1] != 2)):\n raise ValueError('Incompatible size of the edge matrix')\n if edges.max() + 1 > self.V:\n raise ValueError('Incorrect edge specification')\n self.edges = edges\n else:\n self.edges = []",
"def edges(self, edges):\n if edges:\n edges = ensure_list(edges)\n for (nd_out, nd_in) in edges:\n if nd_out not in self.nodes or nd_in not in self.nodes:\n raise Exception(\n f\"edge {(nd_out, nd_in)} can't be added to the graph\"\n )\n self._edges = edges",
"def set_right_edges(self):\n for v in self:\n for e in v.edges_list:\n e.linked[0]=v\n e.linked[1]=self[self.search_index_by_coordinates(e.linked[1].coordinates)]\n for e in self.list_of_edges:\n e.linked[0]=self[self.search_index_by_coordinates(e.linked[0].coordinates)]\n e.linked[1]=self[self.search_index_by_coordinates(e.linked[1].coordinates)]",
"def set_max_edges(self, edges):\n self.max_edges = edges",
"def edge(self, edge: EdgeConfig):\n\n self._edge = edge",
"def ChangeEdgeSet(self, *args):\n return _BRepAlgo.BRepAlgo_DSAccess_ChangeEdgeSet(self, *args)",
"def set_incident_edges(self, incident_edges):\n self.incident_edges = set(incident_edges) # overwrite the existing set of incident edges with the input set",
"def set_incident_edges(self, incident_edges):\n self.incident_edges = set(incident_edges) # overwrite the existing set of incident edges with the input set",
"def addEdges(self, edges):\n for edge in edges:\n self.addEdge(edge[0], edge[1], edge[2])",
"def set_edgeprops(self, edgeprops):\n assert isinstance(edgeprops, dict), \"edgeprops must be a dictionary, even if empty\"\n self.edgeprops = edgeprops",
"def set_min_edges(self, edges):\n self.min_edges = edges",
"def set_edge_colors(self, edge_colors):\n\n self.edge_colors = edge_colors",
"def __saveEdges(self, edges):",
"def add_edges_from(self, edges: Iterable):\n for i, j in edges:\n self.add_edge(i, j)",
"def setUFNumEdges(self, nedges):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n TreeLikelihoodBase.setUFNumEdges(self, nedges)",
"def update(self):\n\n for node in self.nodes:\n for edge in node.edges:\n for i, edge_node in enumerate(edge.nodes):\n if edge_node.id != node.id:\n edge_node.add_edge(edge)\n\n return self",
"def setParent(self, edge):\n self.parent_edge = edge",
"def setup_edges(self, *, _unused=None):\n # Edge from input data\n encoder_edge = ForwardEdge('input',\n self.hyperparameter_config)\n\n # Encoder setup\n self.children[0].setup_edges(encoder_edge)\n\n # Decoder setup\n decoder_edge = ForwardEdge(self.children[0].last_descendant())\n self.children[1].setup_edges(decoder_edge)\n\n # Gene setup is now complete\n self.setup_complete = True\n\n pass",
"def __init__(self, edgelist):\n self.edge = edgelist\n if edgelist:\n self.update_node2edge()",
"def set(self, **attrs):\n self.graph._setattrs(handle=self.handle, **attrs)",
"def addEdges(self, edges: list):\n\n for cell, vertice in edges:\n self.addBow(cell, vertice)",
"def apply_edges(self, func=\"default\", edges=ALL, inplace=True):\n super(BaseGraphStore, self).apply_edges(func, edges, inplace=True)",
"def edges_list(self, edges_list):\n for e in edges_list:\n exceptions.check_pertinent_edge(self, e)\n self._edges_list = edges_list",
"def set_nodeset(self, nodeset):\n self.nodeset = set(nodeset) # overwrite the existing nodeset with the input nodeset\n\n self.__check_validity() # check if graph is valid - throws exception if not",
"def add_all_edges(self):\n for n1 in self.vertices():\n for n2 in self.vertices():\n if n1 != n2:\n self.add_edge((n1, n2))",
"def edge(self, viz_edge: VizEdge) -> None:\n self._digraph.edge(viz_edge.start, viz_edge.end)",
"def update(self, edges) -> None:\n for v1, v2 in edges:\n self.add(v1, v2)"
]
| [
"0.82975215",
"0.80445296",
"0.7617749",
"0.75694954",
"0.69215083",
"0.65117955",
"0.65005696",
"0.64635426",
"0.63808835",
"0.6289433",
"0.6289433",
"0.6232497",
"0.62297255",
"0.6196837",
"0.61252075",
"0.6041478",
"0.6040774",
"0.5979283",
"0.5967948",
"0.5943518",
"0.5935082",
"0.5846767",
"0.5822288",
"0.58044475",
"0.5733746",
"0.5710974",
"0.57017034",
"0.5700249",
"0.5660562",
"0.56510043"
]
| 0.8505694 | 0 |
Makes an efficient schedule for message passing on a tree. | def make_propagation_schedule(grid, root=None):
if root is None:
root = find_center_of_tree(grid)
E = grid.shape[1]
V = 1 + E
neighbors = [set() for _ in range(V)]
edge_dict = {}
for e, v1, v2 in grid.T:
neighbors[v1].add(v2)
neighbors[v2].add(v1)
edge_dict[v1, v2] = e
edge_dict[v2, v1] = e
# Construct a nested schedule.
nested_schedule = []
queue = deque()
queue.append((root, None))
while queue:
v, parent = queue.popleft()
nested_schedule.append((v, parent, []))
for v2 in sorted(neighbors[v]):
if v2 != parent:
queue.append((v2, v))
for v, parent, children in nested_schedule:
for v2 in sorted(neighbors[v]):
if v2 != parent:
children.append(v2)
# Construct a flattened schedule.
schedule = np.zeros([V + E + V, 4], np.int16)
pos = 0
for v, parent, children in reversed(nested_schedule):
schedule[pos, :] = [OP_UP, v, 0, 0]
pos += 1
for child in children:
schedule[pos, :] = [OP_IN, v, child, edge_dict[v, child]]
pos += 1
schedule[pos, :] = [OP_ROOT, v, 0, 0]
pos += 1
for v, parent, children in nested_schedule[1:]:
schedule[pos, :] = [OP_OUT, v, parent, edge_dict[v, parent]]
pos += 1
assert pos == V + E + 1 + E
return schedule | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def algo_schedule():\n\talgo(node.id, node)\n\treactor.callLater(STEP_TIME, algo_schedule)",
"def run(self):\n\n # keep track of counter\n counter = 0\n\n while self.queue:\n\n # print depth of tree every 10000 steps\n if counter % 10000 == 0:\n print(len(self.queue[0]))\n\n # get first moves set from queue\n moves_set = self.get_moves_set()\n\n # move all moves from set\n self.try_moves(moves_set)\n\n # continue branch (add to queue) if layout is not in archive\n if self.not_in_archive():\n self.add_to_queue(moves_set)\n \n # check for win\n if self.won_game():\n\n # return winning set of moves\n return moves_set\n \n # reverse moves to original layout\n self.reverse_moves(moves_set)\n \n # add to counter\n counter += 1",
"def build_tree(self, rebuild=False):\n\n time_qualifier = self.process_hierarchy.bottom_process.time_qualifier\n process_name = self.process_hierarchy.bottom_process.process_name\n if rebuild or self.build_timeperiod is None:\n timeperiod = settings.settings['synergy_start_timeperiod']\n else:\n timeperiod = self.build_timeperiod\n\n timeperiod = cast_to_time_qualifier(time_qualifier, timeperiod)\n actual_timeperiod = time_helper.actual_timeperiod(time_qualifier)\n\n while actual_timeperiod >= timeperiod:\n self.get_node(process_name, timeperiod)\n timeperiod = time_helper.increment_timeperiod(time_qualifier, timeperiod)\n\n self.build_timeperiod = actual_timeperiod",
"def __run_schedules():\n while True:\n __scheduler.run()",
"def schedule_update_all(graph, message_func, reduce_func, apply_func, outframe=...): # -> None:\n ...",
"def messageHandler_PseudoTree(self, msg):\n data = msg.getData()\n sender = msg.getIDSender()\n self.log_message('ID {0} has received message {1} from ID {2}'.format(self.CommID, data, sender))\n if self.Neighbors and self.statusTree == 0: # if node has neighbors at all and is not finished\n if data[0] == 'child' and self.Visited == 0: # if first time current node is visited\n self.OpenNeighbors = copy.deepcopy(self.Neighbors) # open all neighbors of current node...\n self.OpenNeighbors.remove(sender) # ... except for sender node\n #if not self.OpenNeighbors:\n self.Visited = 1\n self.Parent = sender # sender node is parent of current node\n self.log_message('ID {0} has parent with ID {1}'.format(self.CommID, self.Parent))\n self.OPTalg = data[1] # save alg. type\n self.fromTime = data[2] # save fromTime\n self.toTime = data[3] # and toTime\n self.OPTcriterion = data[4] # and opt. criterion\n\n elif data == 'pseudo': # if message of type PSEUDO\n self.Children.remove(sender) # remove sender from children\n self.PseudoParents.append(sender) # and at sender to pseudo parents\n self.noOfChildReplies += 1\n\n elif data[0] == 'child_ok':\n self.noOfChildReplies += 1\n\n elif data[0] == 'child' and sender in self.OpenNeighbors: # if message is of type CHILD and sender is in open neighbors\n self.OpenNeighbors.remove(sender) # remove sender from open neighbors...\n self.PseudoChildren.append(sender) # ... and add sender to pseudo children\n self.sendMessage(sender, 20, 'pseudo') # send pseudo message to sender\n return 0 # wait for next message\n\n elif data[0] == 'ready': # pseudo three generation finished\n\n self.EFluctuationCurve = copy.deepcopy(data[1]) # save fluctuation curve\n self.ERemainderLocal = copy.deepcopy(data[2]) # save initial global remainder\n if self.Children != []: # if not a leave node\n part_fluct = [0 for x in range(len(self.EFluctuationCurve))]\n for t in range(len(part_fluct)):\n part_fluct[t] = self.ERemainderLocal[t] / len(self.Children)\n for i in range(len(self.Children)):\n self.sendMessage(self.Children[i],20, ['ready', copy.deepcopy(self.EFluctuationCurve), copy.deepcopy(part_fluct)]) # inform all children that pseudo tree generation is ready\n #self.printPseudoTreeInfo()\n self.statusTree = 1 # pseudo tree generation for this node is finished\n # start load propagration optimization\n self.startTreeBasedCoord()\n return 0\n\n\n if self.OpenNeighbors: # if open neighbor(s) left\n NewChild = self.OpenNeighbors[0] # choose an open neighbor\n self.OpenNeighbors.remove(NewChild) # and remove it from the set of open neighbors\n # for reproducable results the one with the smallest ID is chosen here\n # TODO: chose child randomly\n self.Children.append(NewChild) # append chosen node to Children list\n self.sendMessage(NewChild, 20, ['child', self.OPTalg, self.fromTime, self.toTime, self.OPTcriterion]) # send child message to new child\n\n else: # no more open neighbors\n if self.Parent and self.noOfChildReplies == len(self.Neighbors)-1: # if current node is NOT the root and all neighbors except parent have sent a reply\n self.sendMessage(self.Parent, 20, ['child_ok']) #send a CHILD_OK message to parent node\n\n elif not self.Parent and self.noOfChildReplies == len(self.Neighbors): # if root node and all neighbors have sent a reply\n self.statusTree = 1 # pseudo tree generation for this node is finished\n for i in range(len(self.Children)):\n part_fluct = [0 for x in range(len(self.EFluctuationCurve))]\n for t in range(len(part_fluct)):\n part_fluct[t] = self.EFluctuationCurve[t] / len(self.Children)\n self.sendMessage(self.Children[i],20, ['ready', copy.deepcopy(self.EFluctuationCurve), copy.deepcopy(part_fluct)]) # inform all children that pseudo tree generation is ready\n\n #self.printPseudoTreeInfo()\n self.startTreeBasedCoord()",
"def messageHandler_PseudoTree(self, msg):\n data = msg.getData()\n sender = msg.getIDSender()\n self.log_message('ID {0} has received message {1} from ID {2}'.format(self.CommID, data, sender))\n if self.Neighbors and self.statusTree == 0: # if node has neighbors at all and is not finished\n if data[0] == 'child' and self.Visited == 0: # if first time current node is visited\n self.OpenNeighbors = copy.deepcopy(self.Neighbors) # open all neighbors of current node...\n self.OpenNeighbors.remove(sender) # ... except for sender node\n #if not self.OpenNeighbors:\n self.Visited = 1\n self.Parent = sender # sender node is parent of current node\n self.log_message('ID {0} has parent with ID {1}'.format(self.CommID, self.Parent))\n self.OPTalg = data[1] # save alg. type\n self.fromTime = data[2] # save fromTime\n self.toTime = data[3] # and toTime\n self.OPTcriterion = data[4] # and opt. criterion\n\n elif data == 'pseudo': # if message of type PSEUDO\n self.Children.remove(sender) # remove sender from children\n self.PseudoParents.append(sender) # and at sender to pseudo parents\n self.noOfChildReplies += 1\n\n elif data[0] == 'child_ok':\n self.noOfChildReplies += 1\n\n elif data[0] == 'child' and sender in self.OpenNeighbors: # if message is of type CHILD and sender is in open neighbors\n self.OpenNeighbors.remove(sender) # remove sender from open neighbors...\n self.PseudoChildren.append(sender) # ... and add sender to pseudo children\n self.sendMessage(sender, 20, 'pseudo') # send pseudo message to sender\n return 0 # wait for next message\n\n elif data[0] == 'ready': # pseudo three generation finished\n\n self.EFluctuationCurve = copy.deepcopy(data[1]) # save fluctuation curve\n self.ERemainderLocal = copy.deepcopy(data[2]) # save initial global remainder\n if self.Children != []: # if not a leave node\n part_fluct = [0 for x in range(len(self.EFluctuationCurve))]\n for t in range(len(part_fluct)):\n part_fluct[t] = self.ERemainderLocal[t] / len(self.Children)\n for i in range(len(self.Children)):\n self.sendMessage(self.Children[i],20, ['ready', copy.deepcopy(self.EFluctuationCurve), copy.deepcopy(part_fluct)]) # inform all children that pseudo tree generation is ready\n #self.printPseudoTreeInfo()\n self.statusTree = 1 # pseudo tree generation for this node is finished\n # start load propagration optimization\n self.startLoadPropagation()\n return 0\n\n\n if self.OpenNeighbors: # if open neighbor(s) left\n NewChild = self.OpenNeighbors[0] # choose an open neighbor\n self.OpenNeighbors.remove(NewChild) # and remove it from the set of open neighbors\n # for reproducable results the one with the smallest ID is chosen here\n # TODO: chose child randomly\n self.Children.append(NewChild) # append chosen node to Children list\n self.sendMessage(NewChild, 20, ['child', self.OPTalg, self.fromTime, self.toTime, self.OPTcriterion]) # send child message to new child\n\n else: # no more open neighbors\n if self.Parent and self.noOfChildReplies == len(self.Neighbors)-1: # if current node is NOT the root and all neighbors except parent have sent a reply\n self.sendMessage(self.Parent, 20, ['child_ok']) #send a CHILD_OK message to parent node\n\n elif not self.Parent and self.noOfChildReplies == len(self.Neighbors): # if root node and all neighbors have sent a reply\n self.statusTree = 1 # pseudo tree generation for this node is finished\n for i in range(len(self.Children)):\n part_fluct = [0 for x in range(len(self.EFluctuationCurve))]\n for t in range(len(part_fluct)):\n part_fluct[t] = self.EFluctuationCurve[t] / len(self.Children)\n self.sendMessage(self.Children[i],20, ['ready', copy.deepcopy(self.EFluctuationCurve), copy.deepcopy(part_fluct)]) # inform all children that pseudo tree generation is ready\n\n #self.printPseudoTreeInfo()\n self.startLoadPropagation()",
"def run(self, scheduler=\"single-threaded\"):\n _ = dask.compute(self.leaves, scheduler=scheduler)\n # when dask goes thru the tree, it knows the full sequence of ops\n # needed to compute each leaf, so this gives dask full authority in\n # determining the best dispatch path.",
"def scheduler(self):\n while True:\n if self.sch.empty():\n self.log.info(\"No scheduled jobs detected. Entering idle state\")\n bits = bitarray()\n # generate random 7B bitarrays\n for _ in range(pow(self.cube_dim,3)):\n bits.append(bool(random.getrandbits(1)))\n self.sch.enter(self.transmit_freq, 4, self.transmit, argument=(0, bits), kwargs={})\n else:\n try:\n self.log.info(\"Scheduled jobs detected. Serving through scheduler runner\")\n self.sch.run()\n except IOError as exc:\n self.log.exception(\"\"\"Scheduler runner encountered an error while executing the \n top level event: %s\"\"\", exc)\n sys.exit(1) # exit with status code 1",
"def simulate(self) -> (int, int):\n sums = [0 for i in range(10)]\n iteration = 0\n time = 0\n cost = 0\n\n # initialize queues list\n queue = [None for _ in self.processors]\n while len(self.done_tasks) < len(self.task_graph.nodes):\n iteration += 1\n \n # apply tasks to queues (processors)\n for qi, proc in enumerate(self.processors):\n if len(self.application[proc.index]) > 0 and queue[qi] == None:\n task = self.application[proc.index].pop(0)\n queue[qi] = ProcessingFactory.ScheduledTask(self.parser, proc, task)\n logging.debug(\"queue: \", qi, \" -> \", task)\n cost += queue[qi].getCost()\n \n # calculates min time after which something will happen to move clock\n min_construction_time = min(queue, key=lambda x: x.getTime() if x else MAX).getTime()\n min_time = min_construction_time\n\n for queue_index, queue_el in enumerate(queue):\n # if queue empty, not interesting\n if not queue_el:\n continue\n parents = self.task_graph.find_parents(queue_el.task)\n logging.debug(queue_el.task, parents)\n\n # if node has no parents, can be built\n if not parents:\n queue_el.enable()\n queue_el.passTime(min_time)\n \n\n # all parents have to be constructed\n if parents and set(parents).intersection(set(self.done_tasks)) == set(parents) and len(self.done_tasks) > 0:\n logging.debug(\"transfer needed\")\n \n transfer_time = 0\n # Resources from all parents have to be on this node to start construction.\n # Performs transfer logic with additional time required for that.\n all_here = True\n for parent in parents:\n ind = self.done_tasks.index(parent)\n if self.location[ind] != queue_index and ind in self.done_tasks:\n all_here = False\n transfer_time += self.task_graph.get_weight(parent, queue_el.task) / self.transfer[self.location[ind]][queue_index].throughput\n logging.debug(\"transfering:\", self.location[ind], ind, \" -> \", queue_index, ind)\n self.location[ind] = queue_index\n \n # If item can be bult enables it in queue with one step delay (to avoid passing time of transmission)\n queue_el.enable(True)\n \n # If all items are already waiting on current node, construction can be started\n if all_here:\n logging.debug(\"all resources in place!\")\n queue_el.enable()\n \n # If there was transfer move time according to time required by transfer, else\n # move time according to minimal construction time\n if queue_el and transfer_time > 0:\n min_time = transfer_time\n elif queue_el:\n min_time = min_construction_time\n \n # Pass time for each element in queue\n for queue_el in queue:\n if queue_el:\n queue_el.passTime(min_time)\n\n time += min_time\n logging.info(\"TIME:\", time, min_time, iteration, self.done_tasks)\n\n # Memory of left times in round buffer to avoid looping for infinity for impossible graphs\n sums[iteration%len(sums)] = sum([x.getTime() for x in queue if x])\n \n # If construction time has passed, item is constructed\n for i, e in enumerate(queue):\n if queue[i] != None and queue[i].getTime() <= 0:\n # Done has task which were finished, location has nodes on which resources are currently stored\n self.done_tasks.append(queue[i].task)\n self.location.append(i)\n queue[i] = None\n\n logging.debug(sums)\n # Checks if all average value of sums is equal to last element, \n # if yes output MAX, MAX which means that it cannot be performed in finite time.\n if sum(sums)/len(sums) == sums[-1]:\n return MAX, MAX\n return time, cost",
"def makeTree(plan, initialState):\n\n\t#The initial node of the behaviour tree\n\ttree = Sequence(\"Tree\")\n\n\t#Added the routines from the black board\n\ttree.add_child(global_vars.black_board.makeRoutines())\n\n\t#The node of the plan\n\tplanTask = Sequence(\"Plan\")\n\n\t#Initialize the first place where the robot starts\n\tlastPlace = global_vars.black_board.getRobotOrigin()\n\n\t#Set all the posible tasks in the black board to be executed\n\tglobal_vars.black_board.taskDone = [False for i in range(len(plan))]\n\n\tstate = copy.deepcopy(initialState)\n\n\t#For every task in the plan...\n\tfor i in range(len(plan)):\n\t\t#If the task is the movement task\n\t\tif plan[i][0] == global_vars.black_board.movementTask:\n\t\t\tcoord = global_vars.black_board.getCoords(plan[i]\n\t\t\t\t[global_vars.black_board.destArg])\n\t\t\tif coord != False:\n\t\t\t\t#Creates a super node to hold the task\n\t\t\t\tactionTask = Sequence(\"Action \" + str(i+1))\n\n\t\t\t\tfunction = hop.operators[plan[i][0]]\n\n\t\t\t\t#Creates a movement task and adds it to the actionTask\n\t\t\t\t#with the corresponding setDoneTask\n\t\t\t\tactionTask.add_child(goToTask(\"MoveToTask: \" +\n\t\t\t\t\tplan[i][global_vars.black_board.destArg], coord))\n\t\t\t\tactionTask.add_child(setDoneTask(\"SetDoneTask \"+ str(i+1), i,\n\t\t\t\t\tfunction, plan[i][1:]))\n\n\t\t\t\t#Updates the robot position\n\t\t\t\tlastPlace = plan[i][2]\n\n\t\t\t\tcheckDone = checkDoneTask(\"CheckDoneTask \"+ str(i+1), i, copy.deepcopy(state))\n\t\t\t\t#Adds a node that first checks if the task has been executed,\n\t\t\t\t#and if not executes it\n\t\t\t\tplanTask.add_child(Selector(\"Task \"+ plan[i][0], [checkDone, actionTask]))\n\t\t\t\tstate = function(copy.deepcopy(state), *plan[i][1:])\n\n\n\t\t\telse:\n\t\t\t\traise ValueError(\"Place not defined in the black board\")\n\n\t\t#If not is the movement task\n\t\telse:\n\t\t\t#Request the executable task to the black board\n\t\t\ttask = global_vars.black_board.getTask(plan[i][0])\n\t\t\tif task != False:\n\n\t\t\t\t#Creates a super node to hold the task\n\t\t\t\tactionTask = Sequence(\"Action \" + str(i+1))\n\n\t\t\t\tfunction = hop.operators[plan[i][0]]\n\n\t\t\t\t#Adds the task and his setDoneTask to the actionTask\n\t\t\t\tactionTask.add_child(task)\n\t\t\t\tactionTask.add_child(setDoneTask(\"SetDoneTask \"+ str(i+1), i,\n\t\t\t\t\tfunction, plan[i][1:]))\n\n\t\t\t\t#Subroutine to check the robots position and returns to the work place\n\t\t\t\tcoords = global_vars.black_board.getCoords(lastPlace)\n\t\t\t\tif coords != False:\n\n\t\t\t\t\tcheckLocation = checkLocationTask(lastPlace)\n\t\t\t\t\tmoveToLasPositionTask = goToTask(\"MoveToTaskLastPosition: \" + lastPlace, coords)\n\n\t\t\t\t\t#The subroutine first checks the location of the robot, and then if necesary moves it\n\t\t\t\t\tNavigationTask = Selector(\"NavSubroutine\", [checkLocation, moveToLasPositionTask])\n\n\t\t\t\t\t#Creates a node with all the executable leaf nodes\n\t\t\t\t\texecTask = Sequence(\"Executable\", [NavigationTask, actionTask])\n\t\t\t\telse:\n\t\t\t\t\traise ValueError(\"Place not defined in the black board\")\n\n\n\t\t\t\tcheckDone = checkDoneTask(\"CheckDoneTask \"+ str(i+1), i, copy.deepcopy(state))\n\t\t\t\t#Adds a node that first checks if the task has been executed,\n\t\t\t\t#and if not executes it\n\t\t\t\tplanTask.add_child(Selector(\"Task \"+ plan[i][0], [checkDone, execTask]))\n\t\t\t\tstate = function(copy.deepcopy(state), *plan[i][1:])\n\t\t\telse:\n\t\t\t\traise ValueError(\"Task not defined in the black board\")\n\n\t#Add the plan to the tree and returns it\n\ttree.add_child(planTask)\n\tglobal_vars.black_board.setReplan(False)\n\n\treturn tree",
"def generate_schedule(top_length, top_depth, left_length, left_depth):\n # The process of calculating the schedule starts from the leftmost\n # topmost element which is active from 0..top_depth timesteps.\n out = np.zeros((left_length, top_length, top_depth), dtype=\"i\")\n out[0][0] = np.arange(top_depth)\n\n # Fill the first col: Every column runs one \"step\" behind the column on\n # its left.\n for col in range(1, top_length):\n out[0][col] = out[0][col - 1] + 1\n\n # Fill the remaining rows. Similarly, all rows run one \"step\" behind the\n # row on their top.\n for row in range(1, left_length):\n out[row][0] = out[row - 1][0] + 1\n for col in range(1, top_length):\n out[row][col] = out[row][col - 1] + 1\n\n return out",
"def schedule_apply_nodes(v, apply_func, node_frame, inplace, outframe=..., ntype=...): # -> None:\n ...",
"def player_loop(self):\n\n # Generate game tree object\n first_msg = self.receiver()\n # Initialize your minimax model\n model = self.initialize_model(initial_data=first_msg)\n\n while True:\n msg = self.receiver()\n\n # Create the root node of the game tree\n node = Node(message=msg, player=0)\n\n # Possible next moves: \"stay\", \"left\", \"right\", \"up\", \"down\"\n best_move = self.search_best_next_move(\n model=model, initial_tree_node=node)\n\n # Execute next action\n self.sender({\"action\": best_move, \"search_time\": None})",
"def startTreeBasedCoord(self):\n self.log_message('ID {0} starts Load Propagation Optimization'.format(self.CommID))\n #self.MsgReceiveCount_interval = 0\n #self.MsgSendCount_interval = 0\n\n self.noOfTimesteps = (self.toTime - self.fromTime) / self.stepSize + 1\n\n # calculate pool of schedules (also saved in self.schedules) and schedule load curves\n if self.getTER1() != 0: # if not a gas boiler\n self.calcSchedulePool(self.fromTime, self.toTime)\n self.calcScheduleConsumptionCurves()\n\n if not self.Parent: #root node\n random.seed() # initialize pseudo random number generator\n\n\n\n # leave nodes select initial best schedule from schedule pool based on fluctuations curve and propagate their load to parent\n if not self.Children:\n #self.ChildrenProcessed = [0 for x in range(len(self.Children))]\n if self.getTER1() != 0: # not a gas boiler\n self.selectBestSchedule(copy.deepcopy(self.ERemainderLocal))\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n self.setStateModlvl(self.chosenSchedule[-1])\n self.sendMessage(self.Parent, 40, ['newload', copy.deepcopy(self.EConsumptionChosenSchedule)])\n else:\n zeros = [0 for x in range(len(self.ERemainderLocal))]\n self.sendMessage(self.Parent, 40, ['newload', copy.deepcopy(zeros)])\n\n else: # if not a leave node\n self.EConsumptionChildCurves = [ [0 for x in range(len(self.EFluctuationCurve))] for y in range(len(self.Children))] # initialize array for load curves of children\n self.EConsumptionChildCurvesRec = [ [0 for x in range(len(self.EFluctuationCurve))] for y in range(len(self.Children))] # initialize array for load curves of children\n self.noOfConsumptionCurvesReceived = 0\n #self.ChildLoadCurvesChosen = [0 for x in range(len(self.Children))]\n return",
"def simulate(self):\n\t\tcounter = 1\n\t\tweek = 1\n\t\twhile counter != 0:\n\t\t\tself.oneWeek(week)\n\t\t\tweek += 1\n\t\t\tcounter = len(self.teams[0].schedule)",
"def _schedule(self, when):\n sched = IScheduler(self.store)\n for scheduledAt in sched.scheduledTimes(self):\n # https://github.com/twisted/epsilon/issues/38\n if when._time < scheduledAt._time:\n sched.reschedule(self, scheduledAt, when)\n break\n else:\n sched.schedule(self, when)",
"def uplink_schedule(self):\n self.test.reset_sattest()\n self.test.zero_epoch() #TODO: we won't always do this - use real sat epoch\n self.test.add_schedule(self.cmds_list[:]) # add all of our commands\n\n for cmd in self.cmds_list:\n epoch_to_send = cmd.epoch # for relative, just subtract current sat epoch .. that's why we have a var\n #TODO: determine schedule time from now based on relative flag\n\n print(\"COMMAND: \", epoch_to_send, cmd.cmdid)\n Clock.schedule_once(partial(self.test.uplink, cmd.cmdid), int(epoch_to_send))\n Clock.schedule_once(partial(self.test.command_timeout, cmd.cmdid), epoch_to_send + cmd.timeout)",
"def _create_schedules(self):\n\n ''''''",
"def messageHandler_TreeBasedCoord(self, msg):\n data = msg.getData()\n sender = msg.getIDSender()\n self.log_message('ID {0} has received msg {1} from ID {2}'.format(self.CommID, data, sender))\n if data[0] == 'newload': # new load curve received by child\n\n for i in range(len(self.Children)): # save received child load curve\n if self.Children[i] == sender:\n for t in range(len(data[1])):\n self.EConsumptionChildCurvesRec[i][t] = copy.deepcopy(data[1][t])\n self.noOfConsumptionCurvesReceived = self.noOfConsumptionCurvesReceived +1\n break\n\n # if load curves received by all children\n if self.noOfConsumptionCurvesReceived == len(self.Children):\n self.noOfConsumptionCurvesReceived = 0 # reset counter for received load curves\n\n #first time all children have sent load curves\n if self.state_coordination == 0:\n self.state_coordination += 1\n consumption_curve = [0 for x in range(len(self.EConsumptionChildCurves[0]))]\n local_remainder = [0 for x in range(len(self.EConsumptionChildCurves[0]))]\n\n #accumulate children's loads\n for c in range(len(self.Children)):\n for t in range(len(self.EConsumptionChildCurves[0])):\n self.EConsumptionChildCurves[c][t] = self.EConsumptionChildCurvesRec[c][t]\n consumption_curve[t] += self.EConsumptionChildCurves[c][t]\n\n # calculate the local remainder (without own load)\n for t in range(len(consumption_curve)):\n local_remainder[t] = self.ERemainderLocal[t] + consumption_curve[t]\n\n if self.getTER1() != 0: # if NOT a gas boiler\n #select own best schedule\n self.selectBestSchedule(local_remainder)\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n self.setStateModlvl(self.chosenSchedule[-1])\n\n #update local remainder with own load curve (global remainder)\n for t in range(len(consumption_curve)):\n local_remainder[t] += self.EConsumptionChosenSchedule[t]\n consumption_curve[t] += self.EConsumptionChosenSchedule[t]\n\n # save new global remainder\n self.ERemainderLocal = local_remainder\n\n if self.Parent: # not root\n for c in range(len(self.Children)):\n self.sendMessage(self.Children[c], 40, ['localrecap', copy.deepcopy(self.ERemainderLocal)])\n else: #root\n # ask all children for a better compensation of the remainder in a new round\n for c in range(len(self.Children)):\n self.sendMessage(self.Children[c], 40 , ['newround', copy.deepcopy(self.ERemainderLocal)])\n\n # any other round than first round\n else:\n\n idx_best_compensation = -1\n min_remainder = -1\n overall_min = -1\n overall_min_idx = -1\n local_remainder = [0 for x in range(len(self.EConsumptionChildCurves[0]))]\n abs_local_remainder = [0 for x in range(len(self.Children))]\n abs_global_remainder = 0\n max_min_diff_local_remainder = [0 for x in range(len(self.Children))]\n\n # calc current absolute global remainder\n for t in range(len(self.ERemainderLocal)):\n abs_global_remainder += abs(self.ERemainderLocal[t])\n max_min_diff_global_remainder = max(self.ERemainderLocal) - min(self.ERemainderLocal)\n\n for c in range(len(self.Children)):\n for t in range(len(local_remainder)):\n local_remainder[t] = self.ERemainderLocal[t] - self.EConsumptionChildCurves[c][t] + self.EConsumptionChildCurvesRec[c][t]\n abs_local_remainder[c] += abs(local_remainder[t])\n max_min_diff_local_remainder[c] = max(local_remainder) - min(local_remainder)\n\n if self.OPTcriterion == 'absremainder':\n #remember overall minimum\n if overall_min_idx == -1 or overall_min - abs_local_remainder[c] > 0.01:\n overall_min = abs_local_remainder[c]\n overall_min_idx = c\n\n if abs_global_remainder - abs_local_remainder[c] >= 1: # improvement greater or equal 1 Watt\n if idx_best_compensation == -1 or abs_local_remainder[c] < min_remainder:\n idx_best_compensation = c\n min_remainder = abs_local_remainder[c]\n\n elif self.OPTcriterion == 'maxmindiff':\n #remember overall minimum\n if overall_min_idx == -1 or overall_min - max_min_diff_local_remainder[c] > 0.01:\n overall_min = max_min_diff_local_remainder[c]\n overall_min_idx = c\n\n\n if max_min_diff_global_remainder - max_min_diff_local_remainder[c] > 0.01: # difference greater than 0.001 Watt\n if idx_best_compensation == -1 or max_min_diff_local_remainder[c] < min_remainder:\n idx_best_compensation = c\n min_remainder = max_min_diff_local_remainder[c]\n\n # no better compensation at all?\n if idx_best_compensation == -1:\n\n consumption_curve = [0 for x in range(len(self.EConsumptionChildCurves[0]))]\n self.log_message('ID {0}: did not receive an improvement by any of its children.'.format(self.CommID))\n\n for c in range(len(self.Children)):\n #send fallback to all children\n if not self.Parent: #root\n self.sendMessage(self.Children[c], 40, 'fallbackforward')\n else: #not root\n self.sendMessage(self.Children[c], 40, 'fallback')\n # calculate current load curve\n for t in range(len(self.ERemainderLocal)):\n consumption_curve[t] += self.EConsumptionChildCurves[c][t]\n\n if self.getTER1() != 0: #NOT a gas boiler\n for t in range(len(consumption_curve)):\n consumption_curve[t] += self.EConsumptionChosenSchedule[t] #add own load to load curve\n\n if self.Parent: #not root --> propagate load curve to parent\n self.sendMessage(self.Parent, 40, ['newload', copy.deepcopy(consumption_curve)])\n\n else: #root\n # if self.noNewRounds < len(self.Children):\n # # tentatively integrate minimal max-min-diff load curve to remainder\n # tentative_remainder = [0 for x in range(len(self.ERemainderLocal))]\n # random_child = random.randint(0, len(self.Children)-1)\n # for t in range(len(tentative_remainder)):\n # tentative_remainder[t] = self.ERemainderLocal[t] - self.EConsumptionChildCurves[random_child][t] + self.EConsumptionChildCurvesRec[random_child][t]\n #\n # for c in range(len(self.Children)):\n # self.sendMessage(self.Children[c], 40, ['newround', copy.deepcopy(tentative_remainder)])\n # self.noNewRounds += 1\n # else:\n # finish algorithm\n self.state_coordination = 9999\n #self.plotDebugInfo(load_curve)\n\n else:\n self.noNewRounds = 0\n # send fallback message to all children except the one that has the best improving load curve\n self.log_message('ID {0}: best compensation is from child {1}'.format(self.CommID, self.Children[idx_best_compensation]))\n #raw_input('press a key')\n for c in range(len(self.Children)):\n if c != idx_best_compensation:\n if not self.Parent: #root\n self.sendMessage(self.Children[c], 40, 'fallbackforward')\n else: #not root\n self.sendMessage(self.Children[c], 40, 'fallback')\n\n # update remainder and save new child load curve\n for t in range(len(self.EConsumptionChildCurves[0])):\n self.ERemainderLocal[t] = self.ERemainderLocal[t] - self.EConsumptionChildCurves[idx_best_compensation][t] + self.EConsumptionChildCurvesRec[idx_best_compensation][t]\n self.EConsumptionChildCurves[idx_best_compensation][t] = self.EConsumptionChildCurvesRec[idx_best_compensation][t]\n\n # update remainder with own new load if not a gas boiler\n if self.getTER1() != 0:\n # select own new load\n\n remainder_without_own_load = [0 for x in range(len(self.ERemainderLocal))]\n for t in range(len(remainder_without_own_load)):\n remainder_without_own_load[t] = self.ERemainderLocal[t] - self.EConsumptionChosenSchedule[t]\n\n self.selectBestSchedule(self.ERemainderLocal)\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n self.setStateModlvl(self.chosenSchedule[-1])\n\n for t in range(len(remainder_without_own_load)):\n self.ERemainderLocal[t] = remainder_without_own_load[t] + self.EConsumptionChosenSchedule[t]\n\n # start new round\n self.state_coordination += 1\n for c in range(len(self.Children)):\n if not self.Parent: #root\n self.sendMessage(self.Children[c], 40, ['newround', copy.deepcopy(self.ERemainderLocal)])\n else: #not root\n self.sendMessage(self.Children[c], 40, ['localrecap', copy.deepcopy(self.ERemainderLocal)])\n\n\n elif data == 'fallback':\n if self.getTER1() != 0: # NOT a gas boiler\n self.chosenScheduleIndex = self.prevChosenScheduleIndex\n self.chosenSchedule = self.schedules[self.chosenScheduleIndex]\n self.EConsumptionChosenSchedule = copy.deepcopy(self.EConsumptionScheduleCurves[self.chosenScheduleIndex])\n\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n self.setStateModlvl(self.chosenSchedule[-1])\n\n self.log_message('ID {0} has performed fallback to schedule {1}'.format(self.CommID, self.chosenScheduleIndex))\n else:\n self.log_message('ID {0} is GB (no schedule to fallback to)'.format(self.CommID))\n\n\n elif data == 'fallbackforward':\n if self.getTER1() != 0: # NOT a gas boiler\n self.chosenScheduleIndex = self.scheduleIdxOfPreviousRound\n self.chosenSchedule = self.schedules[self.chosenScheduleIndex]\n # save previous load curve\n self.EConsumptionChosenSchedule = copy.deepcopy(self.EConsumptionScheduleCurves[self.chosenScheduleIndex])\n\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n self.setStateModlvl(self.chosenSchedule[-1])\n\n self.log_message('ID {0} has performed fallback to schedule {1}'.format(self.CommID, self.chosenScheduleIndex))\n else:\n self.log_message('ID {0} is GB (no schedule to fallback to)'.format(self.CommID))\n\n #inform all children about fallback\n if self.Children:\n for c in range(len(self.Children)):\n self.sendMessage(self.Children[c], 40, 'fallbackforward')\n\n\n elif data[0] == 'newround':\n self.ERemainderLocal = copy.deepcopy(data[1])\n self.state_coordination = 0\n if self.getTER1() != 0: #if not a gas boiler\n #remember schedule before starting a new round\n self.scheduleIdxOfPreviousRound = self.chosenScheduleIndex\n\n if self.Children: # NOT a leave node\n # forward compensation curve to all children\n for c in range(len(self.Children)):\n self.sendMessage(self.Children[c], 40, ['newround', copy.deepcopy(self.ERemainderLocal)])\n else: #leave node\n if self.getTER1() != 0: # not a gas boiler\n #remainder_without_own_load = [0 for x in range(len(self.ERemainderLocal))]\n #for t in range(len(remainder_without_own_load)):\n # remainder_without_own_load[t] = self.ERemainderLocal[t] - self.EConsumptionChosenSchedule[t]\n self.selectBestSchedule(self.ERemainderLocal)\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n self.setStateModlvl(self.chosenSchedule[-1])\n self.sendMessage(self.Parent, 40, ['newload', copy.deepcopy(self.EConsumptionChosenSchedule)])\n else:\n zeros = [0 for x in range(len(self.ERemainderLocal))]\n self.sendMessage(self.Parent, 40, ['newload', copy.deepcopy(zeros)])\n\n elif data[0] == 'localrecap':\n self.ERemainderLocal = copy.deepcopy(data[1])\n consumption_curve = [0 for x in range(len(self.ERemainderLocal))]\n\n if self.getTER1() != 0: #NOT a gas boiler\n #remainder_without_own_load = [0 for x in range(len(self.ERemainderLocal))]\n #for t in range(len(remainder_without_own_load)):\n # remainder_without_own_load[t] = self.ERemainderLocal[t] - self.EConsumptionChosenSchedule[t]\n\n\n self.selectBestSchedule(copy.deepcopy(self.ERemainderLocal))\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n self.setStateModlvl(self.chosenSchedule[-1])\n\n if self.Children: # NOT a leave node\n for c in range(len(self.Children)):\n for t in range(len(consumption_curve)):\n consumption_curve[t] += self.EConsumptionChildCurves[c][t]\n\n for t in range(len(consumption_curve)): # add own load\n consumption_curve[t] += self.EConsumptionChosenSchedule[t]\n\n else: #gas boiler\n\n if self.Children: # NOT a leave node\n for c in range(len(self.Children)):\n for t in range(len(consumption_curve)):\n consumption_curve[t] += self.EConsumptionChildCurves[c][t]\n\n self.sendMessage(self.Parent, 40, ['newload', copy.deepcopy(consumption_curve)])",
"def call_schedule(self, bot, update):\n bot.send_message(update.message.chat_id, '_1 пара_ 08:30 - 10:05\\n'\n '_2 пара_ 10:25 - 12:00\\n'\n '_3 пара_ 12:20 - 13:55\\n'\n '_4 пара_ 14:15 - 15:50\\n'\n '_5 пара_ 16:10 - 17:45',\n parse_mode='Markdown')",
"def apply(tree, parameters=None):\r\n from pm4py.objects.bpmn.bpmn_graph import BPMN\r\n counts = Counts()\r\n bpmn = BPMN()\r\n start_event = BPMN.StartEvent(name=\"start\", isInterrupting=True)\r\n end_event = BPMN.EndEvent(name=\"end\")\r\n bpmn.add_node(start_event)\r\n bpmn.add_node(end_event)\r\n bpmn, counts, _, _ = recursively_add_tree(tree, tree, bpmn, start_event, end_event, counts, 0)\r\n bpmn = delete_tau_transitions(bpmn, counts)\r\n\r\n return bpmn",
"def get_move(self, state):\n # this method should only be called when self is real root.,so that's here where we can should use mutiprocess\n if self._root.is_leaf(): # no expanded children yet\n action_probs, _ = self._policy(state)\n self._root.expand(action_probs)\n\n the_children = self._root._children\n i = 0\n sorted_children = sorted(the_children.items(), key=lambda act_node: act_node[1].get_value(self._c_puct))\n for child_node in sorted_children:\n i += 1\n child_tree = MCTS(policy_value_fn,root=child_node[1])\n state_copy = copy.deepcopy(state)\n state_copy.do_move(child_node[0])\n visits_count = 0\n for j in range(0,relu(1200-i*20),10): # at least run one time\n child_tree._playout(copy.deepcopy(state_copy))\n visits_count += 1\n self._root.update(-child_tree.get_root_node().last_leafvalue,visits_count=visits_count) # update real root\n child_tree.get_root_node().set_parent(self._root) # to link the sub tree\n\n '''\n for n in range(self._n_playout):\n # get top n (assumed to be 6) nodes from children\n # step1 let all children of root have chance to run in parallel\n # adjust the round count of children by value\n if n%6 == 0:\n the_children = self._root._children\n top_n = sorted(the_children.items(),key=lambda act_node: act_node[1].get_value(self._c_puct))[:6]\n for child_node in top_n:\n # child_tree = MCTS(policy_value_fn,copy.deepcopy(child_node)) # use copy because we will use it in multiprocess\n child_tree = MCTS(policy_value_fn,\n child_node) \n state_copy = copy.deepcopy(state)\n state_copy.do_move(child_node[0])\n child_tree._playout(state_copy)\n self._root.update(-child_tree.get_root_node().last_leafvalue) # update real root\n child_tree.get_root_node().set_parent(self._root) # to link the sub tree\n # self._root.get_children()[child_node[0]] = child_tree.get_root_node() # copy sub tree\n '''\n\n '''\n return max(self._root._children.items(),\n # key=lambda act_node: act_node[1].get_visits())[0]\n key=lambda act_node: act_node[1].get_value(self._c_puct))[0]\n '''\n\n for n in range(300):\n state_copy = copy.deepcopy(state)\n self._playout(state_copy)\n return max(self._root._children.items(),\n key=lambda act_node: act_node[1].get_value(self._c_puct))[0]",
"def act(state: Tree) -> None:\n cur_state = state.value\n state.children = [Tree(cur_state.make_move(m)) for m in\n cur_state.get_possible_moves()]",
"def run(self):\n IScheduler(self.subStore).tick()",
"def _transientSchedule(self, when, now):\n if self.store.parent is not None:\n subStore = self.store.parent.getItemByID(self.store.idInParent)\n hook = self.store.parent.findOrCreate(\n _SubSchedulerParentHook,\n subStore=subStore)\n hook._schedule(when)",
"def transmission_scheduler(self, ap_index:int):\n # sched_load = False\n # next_transmission_time = 0\n # current_sq = self.rec_reg.read()[ap_index]\n \n # for i in range(len(self.sch.queue)):\n # if self.sch.queue[i][1] == 4:\n # sched_load = True\n # next_transmission_time = self.sch.queue[i][0]\n # break\n \n # sched_args = list()\n # time_deadlines = list()\n \n # if sched_load:\n # time_sum = next_transmission_time + self.transmit_freq/2 \n # else:\n # time_sum = time.time()\n \n # #prebuild a list of transmission events and times for efficient entry into the scheduler\n # while True:\n # # delay added at start to avoid race between transmit() trying to read from the queue \n # # and the scheduler filling the queue\n # time_sum += self.transmit_freq\n # try:\n # # session queue of type bitarray\n # sched_args.append(current_sq.next())\n # # delete session queue object when the full queue is added to the scheduler\n # except ValueError:\n # # disconnect signal for transmit\n # time_deadlines.append(time_sum)\n # sched_args.append(None)\n # break\n # time_deadlines.append(time_sum)\n \n # #enter transmission events into the scheduler\n # for i in enumerate(time_deadlines):\n # self.sch.enterabs(time_deadlines[i], 4, self.transmit, \n # argument=(ap_index,sched_args[i]), kwargs={})\n # #print_queue(self.s.queue)\n # self.log.info(\"Scheduled transmission events for AP: %s\", ap_index)\n # self.log.info(\"Estimated transmission duration (s): %s\", \n # self.sch.queue[len(self.sch.queue)-1][0]-self.sch.queue[0][0])",
"def make_annealing_schedule(num_rows, epochs, sample_tree_rate):\n assert epochs >= 1.0\n assert sample_tree_rate >= 1.0\n # Randomly shuffle rows.\n row_ids = list(range(num_rows))\n np.random.shuffle(row_ids)\n row_to_add = itertools.cycle(row_ids)\n row_to_remove = itertools.cycle(row_ids)\n\n # Use a linear annealing schedule.\n epochs = float(epochs)\n add_rate = epochs\n remove_rate = epochs - 1.0\n state = 2.0 * epochs\n\n # Sample the tree sample_tree_rate times per batch.\n num_assigned = 0\n next_batch = 0\n while num_assigned < num_rows:\n if state >= 0.0:\n yield 'add_row', next(row_to_add)\n state -= remove_rate\n num_assigned += 1\n next_batch -= sample_tree_rate\n else:\n yield 'remove_row', next(row_to_remove)\n state += add_rate\n num_assigned -= 1\n if num_assigned > 0 and next_batch <= 0:\n yield 'sample_tree', None\n next_batch = num_assigned",
"def _tree_update(self, new_tree: Tree, tree_update_time: datetime):\n self.__tree_update_time = tree_update_time\n self.__new_tree = new_tree\n self.__new_event_types_listeners = self._register_event_listeners(self.__new_tree)\n self.__is_simultaneous_state = True",
"def _downward(self, root=0):\n\n if self._verbosity > 0:\n print(\"sending messages towards the leaf nodes\", end=\"\", flush=True)\n ready_to_send = set([root])\n while len(ready_to_send) > 0:\n current = ready_to_send.pop()\n self.clique_beliefs[current] = self._calc_message(current, self.children[current], False)\n ready_to_send.update(self.children[current])\n if self._verbosity > 0:\n print(\".\", end=\"\", flush=True)\n if self._verbosity > 0:\n print(\"\", end=\"\\n\", flush=True)"
]
| [
"0.6145357",
"0.5590652",
"0.5394166",
"0.5393599",
"0.538951",
"0.53780746",
"0.53780746",
"0.5376024",
"0.53468835",
"0.5345341",
"0.5336988",
"0.5289542",
"0.5259551",
"0.5243181",
"0.52406275",
"0.52231073",
"0.5221868",
"0.52196497",
"0.51684105",
"0.51681745",
"0.51432544",
"0.51329094",
"0.50637203",
"0.50287455",
"0.5022232",
"0.5017196",
"0.5008896",
"0.4990699",
"0.49792412",
"0.49661493"
]
| 0.5650114 | 1 |
Remove edge at position e from tree and update data structures. | def remove_edge(self, e):
assert len(self.e2k) == self.VEK[1]
assert len(self.k2e) == self.VEK[1]
neighbors = self.neighbors
components = self.components
k = self.e2k.pop(e)
self.k2e.pop(k)
v1, v2 = self.grid[1:, k]
neighbors[v1].remove(v2)
neighbors[v2].remove(v1)
stack = [v1]
while stack:
v1 = stack.pop()
components[v1] = True
for v2 in neighbors[v1]:
if not components[v2]:
stack.append(v2)
assert len(self.e2k) == self.VEK[1] - 1
assert len(self.k2e) == self.VEK[1] - 1
return k | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_edge(self, edge: Edge) -> Edge:",
"def remove_edge(self, edge):\n if self.get_edge(edge):\n del self[edge[0]][edge[1]]\n del self[edge[1]][edge[0]]",
"def _del(self, handle=\"\", node=\"\", edge=\"\", subg=\"\"):\n head, tail = '', ''\n if edge:\n head, tail = edge\n\n node, head, tail, subg = map(encode_page, [node, head, tail, subg])\n\n self.changed = 1\n if head and tail:\n item = gv.findedge(gv.findnode(handle, head),\n gv.findnode(handle, tail))\n elif node:\n item = gv.findnode(handle, node)\n elif subg:\n item = gv.findsubg(handle, subg)\n elif handle:\n item = handle\n else:\n raise ValueError(\"No graph element or element type specified\")\n if item:\n gv.rm(item)",
"def delete_branch(self, e=0):\n ### TODO: debug\n #self.show_debug()\n # delete childs\n mask = str(self.n_parent) + \":\" + str(self.n)\n for k, v in self.d.items():\n if str(k).startswith(mask):\n self.d.pop(k)\n\n # renum numbers above\n\n level = len(str(self.n_parent).split(\":\")) + 1\n self.renum_branches_minus(level, self.n)\n\n # delete current item\n #self.n = self.n\n #mask_next = str(self.n_parent) + \":\" + str(self.n)\n #if self.d.get(mask_next):\n # self.set_value(self.n_parent, self.n)\n #else:\n # self.next_item()\n self.clear_controls()\n mask = str(self.n_parent) + \":\" + str(self.n)\n if self.d.get(mask):\n self.set_value(self.n_parent, self.n)\n else:\n self.second_main_text.SetValue('')\n self.second_mnemo_text.SetValue('')\n # set previous\n try:\n mask = str(self.n_parent) + \":\" + str(self.n - 1)\n self.first_main.SetLabelText(self.d[mask][0])\n self.first_mnemo.SetLabelText(self.d[mask][1])\n except:\n pass\n self.set_arrows()",
"def remove(self, e):\n p = self._find_position(e) # try to locate existing element\n if p is not None:\n self._data.delete(p) # delete if found",
"def remove_edge(self):\r\n (face, cycle) = self.find_girth()\r\n \r\n k = len(face)\r\n assert k >= 2, \"length of face less than 2\"\r\n\r\n e_id = face[0]\r\n self.graph.remove_edge(e_id)\r\n a = cycle[0]\r\n b = cycle[1]\r\n e1 = self.graph.smooth_vertex(a)\r\n #(x1, y1) = self.graph.get_edge(e1).get_endpoints()\r\n #removed_1st = (a, x1, y1)\r\n e2 = self.graph.smooth_vertex(b)\r\n #(x2, y2) = self.graph.get_edge(e2).get_endpoints()\r\n #removed_2nd = (b, x2, y2)\r\n #\r\n # e1 = x --- a --- x\r\n # |e_id\r\n # e2 = x --- b --- x\r\n #\r\n # ( (v_id1,(x1,y1)), (v_id2,(x1,y1)) )\r\n #self.edge_removed_info.append((removed_1st, removed_2nd))\r\n self.state = \"initial\"\r\n self.removed.append((e1, e2, cycle))\r\n #print \"removed: \", (e1, e2, cycle)\r",
"def removeChild(self, edge):\n del self.child_edges[edge.getId()]",
"def bipartition_by_edge(self, e):\n\n t = self._tree\n nr = e.head_node\n assert e.tail_node is not None\n assert e.head_node is not None\n assert nr.parent_node is e.tail_node\n is_valid_tree(t)\n\n potentially_deleted_nd = e.tail_node\n grandparent_nd = potentially_deleted_nd.parent_node\n e.tail_node.remove_child(nr, suppress_unifurcations=True)\n\n nr.edge.length = None\n nr.parent_node = None\n convert_node_to_root_polytomy(nr)\n t1 = PhylogeneticTree(Tree(seed_node=nr))\n # temp we could speed this up,\n # by telling the Phylogenetic tree how many leaves it has\n n1 = t1.n_leaves\n\n if hasattr(e, \"num_leaves_below\"):\n if grandparent_nd is None:\n old_root = potentially_deleted_nd\n if old_root.edge:\n old_root.edge.num_leaves_below -= n1\n else:\n if potentially_deleted_nd in grandparent_nd.child_nodes():\n potentially_deleted_nd.edge.num_leaves_below -= n1\n old_root = grandparent_nd\n if old_root.edge:\n old_root.edge.num_leaves_below -= n1\n while old_root.parent_node:\n old_root = old_root.parent_node\n if old_root.edge:\n old_root.edge.num_leaves_below -= n1\n else:\n old_root = grandparent_nd or potentially_deleted_nd\n while old_root.parent_node:\n old_root = old_root.parent_node\n\n t2 = PhylogeneticTree(Tree(seed_node=old_root))\n\n is_valid_tree(t1._tree)\n is_valid_tree(t2._tree)\n return t1, t2",
"def remove_edges(self, node: NodeKey) -> Edge:",
"def remove_edge(self, u, v):\n \n try:\n del self.prefix[v][u]\n del self.suffix[u][v]\n except:\n print(\"ERROR: The edges not in graph\")",
"def remove_Edge(self, rm_edge_list):\n self.G.remove_edges_from(rm_edge_list)\n self.G.edges()\n return self.G",
"def remove_edge(self, id):\r\n\t\tedge = self._edges[id]\r\n\t\tlabel = edge.label\r\n\t\tdel edge.start_node._edges[label]\r\n\t\tdel edge.end_node._edges[label]\r\n\t\tdel self._edges[id]",
"def removeNode(self, node):",
"def removeEdge(self, edge: Edge):\n if edge in self.edges:\n self.edges.remove(edge)\n else:\n print('!W', 'Scene:removeEdge', 'wanna remove edge', edge, 'from self.edges but it is not in the list!')",
"def remove(self,node,verbose=False):\n for label,parent in node.incoming:\n parent.outgoing.remove((label,node))\n for label,child in node.outgoing:\n child.incoming.remove((label,node))\n self.pop(node.nodeid)\n for x,y in copy(self.undirected):\n if x == node or y == node:\n self.undirected.remove((x,y))\n if self.root == node:\n self.root = None\n if verbose: print('removed',node)",
"def del_edge (self, src, dst):\n raise NotImplementedError",
"def update_node2edge(self):\n self.node2edge = {e.child : e for e in self.edge}\n childrenset = set(self.node2edge.keys())\n rootset = set(e.parent for e in self.edge).difference(childrenset)\n if len(rootset) > 1:\n raise Warning(\"there should be a single root: \" + str(rootset))\n if len(rootset) == 0:\n raise Exception(\"there should be at least one root!\")\n self.root = rootset.pop()",
"def delete(self):\r\n if self.__abstract__:\r\n raise ThunderdomeException('cant delete abstract elements')\r\n if self.eid is None:\r\n return self\r\n query = \"\"\"\r\n e = g.e(eid)\r\n if (e != null) {\r\n g.removeEdge(e)\r\n g.stopTransaction(SUCCESS)\r\n }\r\n \"\"\" \r\n results = execute_query(query, {'eid':self.eid})",
"def remove_node(self, node):\n node.pre.post = node.post\n node.post.pre = node.pre",
"def remove_node(self, node):\n \n try:\n num_of_edge = len(self.prefix[node]) + len(self.suffix[node])\n self.node_set.remove(node)\n \n # remove edge associated with the node\n for key in self.prefix[node]:\n \n self.suffix[key].pop(node)\n \n for key in self.suffix[node]:\n \n self.prefix[key].pop(node)\n \n self.prefix.pop(node)\n self.suffix.pop(node)\n \n self.num_node -= 1\n self.edges -= num_of_edge\n \n except:\n print(\"ERROR: No node found.\")",
"def remove_edge(self, edge_name, ignore_if_absent=False):\n if edge_name in self.edges:\n del self.edges[edge_name]\n self.build_order.remove(edge_name)\n else:\n if not ignore_if_absent:\n raise ValueError(\n \"Attempted to remove the edge \" + edge_name + \", which doesn't exist.\")\n else:\n pass",
"def remove_edge(self, rtype, node1, node2):\n self.nodes[node1].remove_relation(rtype,node2)\n self.nodes[node2].remove_predecessor(rtype,node1)\n self.dirty = True",
"def removeLabel(edge):\n return edge[:-2]",
"def removeLabel(edge):\n return edge[:-2]",
"def remove_edge_root(self):\n\n to_remove = None\n\n # Find this in the parent's branch list\n if self.parent_root:\n for branch_tuple in self.parent_root.branch_list:\n if branch_tuple[1] is self:\n to_remove = branch_tuple\n break\n\n if to_remove:\n self.parent_root.branch_list.remove(to_remove)\n\n to_remove = None\n\n # Find this in the parent's endpoint branch list\n if self.parent_root:\n for branch in self.parent_root.branches_at_endpoint:\n if branch is self:\n to_remove = branch\n break\n\n if to_remove:\n self.parent_root.branches_at_endpoint.remove(to_remove)\n\n if self.parent_root.branches_at_endpoint:\n return None\n else:\n return self.parent_root",
"def remove_edge(G, u, v):\n h = G.copy()\n h.remove_edge(u, v)\n return h",
"def remove_edge(self, u, v):\r\n elements = self.d.keys()\r\n #check for nodes in graph\r\n if u not in elements or v not in elements:\r\n raise KeyError(str(u) + \" and \" + str(v) + \" must be in graph\")\r\n #remove other node from each value set\r\n self.d[u].remove(v)\r\n self.d[v].remove(u)",
"def remove_edge(self, current_id):\n if current_id is None:\n raise Exception('Edge ID can not be None')\n if current_id not in self.edges:\n raise Exception('Edge ID does not exist')\n\n # current_edge = self.edges[current_id]\n # out_vertex=current_edge.get_vertex(Direction.OUT)\n # in_vertex=current_edge.get_vertex(Direction.IN)\n # if out_vertex and out_vertex.out_edges:\n # for edge in out_vertex.out_edge:\n self.edges.pop(current_id, None)",
"def remove_edge(\n self, subject_node: str, object_node: str, edge_key: Optional[str] = None\n ) -> None:\n self.graph.remove_edge(subject_node, object_node, edge_key)",
"def delete(self, edge: QuadEdge) -> None:\n if edge is self.right_side or edge.opposite is self.right_side:\n assert (self.right_side\n is not self.right_side.right_from_end.opposite)\n self.right_side = self.right_side.right_from_end.opposite\n if edge is self.left_side or edge.opposite is self.left_side:\n assert self.left_side is not self.left_side.left_from_start\n self.left_side = self.left_side.left_from_start\n edge.delete()"
]
| [
"0.71544826",
"0.6813152",
"0.6589992",
"0.6541133",
"0.64621186",
"0.6431649",
"0.6385933",
"0.6373629",
"0.63383347",
"0.62092847",
"0.62010515",
"0.6177546",
"0.61467075",
"0.6116178",
"0.6074385",
"0.60594064",
"0.6028453",
"0.5965395",
"0.5951493",
"0.584998",
"0.5843794",
"0.5837668",
"0.58336145",
"0.58336145",
"0.580674",
"0.5764052",
"0.5760051",
"0.5733592",
"0.5732474",
"0.5727242"
]
| 0.7103287 | 1 |
Add edge k at location e and update data structures. | def add_edge(self, e, k):
assert len(self.e2k) == self.VEK[1] - 1
assert len(self.k2e) == self.VEK[1] - 1
v1, v2 = self.grid[1:, k]
assert self.components[v1] != self.components[v2]
self.k2e[k] = e
self.e2k[e] = k
self.neighbors[v1].add(v2)
self.neighbors[v2].add(v1)
self.components[:] = False
assert len(self.e2k) == self.VEK[1]
assert len(self.k2e) == self.VEK[1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_edge(self, e):\n v, w = e\n self[v][w] = e\n self[w][v] = e",
"def add_edge(self, e):\n a, b = e\n self[a][b] = e\n self[b][a] = e",
"def addEdge(self, e):\n v = e.either()\n w = e.other(v)\n self._validateVertex(v)\n self._validateVertex(w)\n self._adj[v].add(e)\n self._adj[w].add(e)\n self._E += 1",
"def add_edge(self, s, e):\n self.graph[s].append(e)",
"def add_edge(self, edge):\n self[edge[0]][edge[1]] = edge\n self[edge[1]][edge[0]] = edge",
"def add_edge(u, v):\n adj[u].append(v)\n adj[v].append(u)",
"def add_edge(u, v):\n adj[u].append(v)\n adj[v].append(u)",
"def addEdge(self,u,v):\r\n self.graph[u].append(v)",
"def add_edge(i, j):\n if (i, j) in edges or (j, i) in edges:\n # Si ya esta agregado en la lista no agrega nada\n return\n edges.add( (i, j) )\n edge_points.append(points[ [i, j] ])",
"def add_edge(self, u, v):\r\n keys = self.d.keys()\r\n #if nodes are not in graph, add them\r\n if u not in keys:\r\n self.add_node(u)\r\n if v not in keys:\r\n self.add_node(v)\r\n #add each node to the value set of each other\r\n u_old = self.d[u]\r\n u_new = u_old.union(set(str(v)))\r\n v_old = self.d[v]\r\n v_new = v_old.union(set(str(u)))\r\n self.d.update({u:u_new, v:v_new})",
"def add_edge(self, ed):\n self.edge.append(ed)\n self.update_node2edge()",
"def add_edge(self, u, v):\n self.graph[u].append(v)",
"def addEdge(self,x,y):\n\t\tself._dict[x].append(y)",
"def add_edge(self, src_key, dest_key, weight=1):\n self.vertices[src_key].add_neighbour(self.vertices[dest_key], weight)",
"def add_edge(self, edge):\n if(self.has_edge(edge) == 0):\n self.__graph_dict[edge[0]].append(edge[1])",
"def add_edge(self, key1, key2, weight=0):\n\n \n if key1 not in self.graph and key2 not in self.graph:\n raise ValueError(\"Both Vertex of keys {} and {} not in Graph\".format(key1, key2))\n elif key1 not in self.graph or key2 not in self.graph:\n raise ValueError(\"Either Vertex of keys {} and {} not in Graph\".format(key1, key2))\n\n elif key1 == key2:\n raise ValueError(\"Vertex {} can't be its own neighbor\".format(key1))\n else:\n # Get the two neighbor verteces\n vertex_one = self.graph[key1]\n vertex_two = self.graph[key2]\n\n # Code idea from Vicenzo : https://github.com/C3NZ/CS22/blob/master/challenges/graph.py#L77\n added_from = vertex_one.add_neighbor(vertex_two, weight)\n added_to = vertex_two.add_neighbor(vertex_one, weight)\n\n if added_from and added_to:\n self.edges += 1",
"def add_edge(self, u, v, w):\n self.adj[u].append((v, w))",
"def edges(self, e):\n self._edges = e",
"def add_edge(self, edge):\n edge = set(edge)\n (vertex1, vertex2) = tuple(edge)\n if vertex1 in self.graph_dict:\n self.graph_dict[vertex1].append(vertex2)\n else:\n self.graph_dict[vertex1] = [vertex2]",
"def add_edge(G, G2, node_names, key, destination):\n ConfigurationDag.check_node_exists(node_names, destination)\n logging.debug(\"Adding edge %s -> %s\" % (key, destination))\n G.add_edge(key, destination)\n G2.add_edge(key, destination)",
"def add_edge(self, u, v, val):\n raise NotImplementedError()",
"def add_edge(self, e):\n x = min(e)\n y = max(e)\n if x not in self._vertices:\n self.add_vertex(x)\n if y not in self._vertices:\n self.add_vertex(y)\n self._edges.add( (x, y) )",
"def add_edge(self, v1, v2):\n pass # TODO",
"def add_edge(self, edge):\n edge = set(edge)\n (vertex1, vertex2) = tuple(edge)\n if vertex1 in self.__graph_dict:\n self.__graph_dict[vertex1].append(vertex2)\n else:\n self.__graph_dict[vertex1] = [vertex2]",
"def add_edge_directed(u, v):\n adj[u].append(v)",
"def add_edge_directed(u, v):\n adj[u].append(v)",
"def connect(self, u, v):\n self.e[u].add(v)\n self.e[v].add(u)",
"def addEdge(self,u,v,w):\r\n self.graph.append([u,v,w])",
"def addEdge(source, target):\n\n\t\t# append the edge which contain source and target to the graph defaultdict\n\t\tgraph[source].append(target)\n\n\t\t\"\"\"initialize reference dictionary for each node\"\"\"\n\n\t\t# append the source as key and 0 as value to the reference dictionary\n\t\treference[source] = [0, 0, False, False]",
"def add_edge(self, edge):\n edge = set(edge)\n (vertex1, vertex2) = tuple(edge)\n if vertex1 not in self.__graph_dict:\n self.__graph_dict[vertex1] = []\n dbg_str = \"Vertex being initialized ..\" + str(vertex1)\n # logging.debug(dbg_str)\n if vertex2 not in self.__graph_dict:\n self.__graph_dict[vertex2] = []\n dbg_str = \"Vertex being initialized ..\" + str(vertex2)\n # logging.debug(dbg_str)\n if vertex2 not in self.__graph_dict[vertex1]:\n self.__graph_dict[vertex1].append(vertex2)\n dbg_str = \"Appending .. \" + str(vertex2), \"to ->\" +str(vertex1)\n # logging.debug(dbg_str)\n\n if vertex1 not in self.__graph_dict[vertex2]:\n self.__graph_dict[vertex2].append(vertex1)\n dbg_str = \"Appending .. \" + str(vertex1), \"to ->\" +str(vertex2)\n # logging.debug(dbg_str)"
]
| [
"0.75295997",
"0.7464711",
"0.6864501",
"0.6483626",
"0.62846744",
"0.6208539",
"0.6208539",
"0.6100469",
"0.6099873",
"0.60953695",
"0.608646",
"0.6033433",
"0.6003985",
"0.5998347",
"0.59671044",
"0.5966964",
"0.5939195",
"0.5916484",
"0.5904478",
"0.58792466",
"0.58771837",
"0.5873838",
"0.5863216",
"0.58566844",
"0.58006793",
"0.58006793",
"0.5780108",
"0.57622457",
"0.5743075",
"0.574076"
]
| 0.8439938 | 0 |
Sample a random spanning tree of a dense weighted graph using MCMC. This uses Gibbs sampling on edges. Consider E undirected edges that can move around a graph of V=1+E vertices. The edges are constrained so that no two edges can span the same pair of vertices and so that the edges must form a spanning tree. To Gibbs sample, chose one of the E edges at random and move it anywhere else in the graph. After we remove the edge, notice that the graph is split into two connected components. The constraints imply that the edge must be replaced so as to connect the two components. Hence to Gibbs sample, we collect all such bridging (vertex,vertex) pairs and sample from them in proportion to exp(edge_logits). | def sample_tree(grid, edge_logits, edges, steps=1):
logger.debug('sample_tree sampling a random spanning tree')
COUNTERS.sample_tree_calls += 1
if len(edges) <= 1:
return edges
tree = MutableTree(grid, edges)
V, E, K = tree.VEK
for step in range(steps):
for e in range(E):
e = np.random.randint(E) # Sequential scanning doesn't work.
k1 = tree.remove_edge(e)
valid_edges = np.where(
tree.components[grid[1, :]] != tree.components[grid[2, :]])[0]
valid_probs = edge_logits[valid_edges]
valid_probs -= valid_probs.max()
np.exp(valid_probs, out=valid_probs)
total_prob = valid_probs.sum()
if total_prob > 0:
valid_probs *= 0.9999995 / total_prob # Avoid np.binom errors.
k2 = valid_edges[sample_from_probs(valid_probs)]
else:
k2 = k1
COUNTERS.sample_tree_infeasible += 1
tree.add_edge(e, k2)
COUNTERS.sample_tree_propose += 1
COUNTERS.sample_tree_accept += (k1 != k2)
HISTOGRAMS.sample_tree_log2_choices.update(
[len(valid_edges).bit_length()])
edges = sorted((grid[1, k], grid[2, k]) for k in tree.e2k.values())
assert len(edges) == E
return edges | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def random_sample(G):\n E = collections.defaultdict(list) # to store the new sampled preference list\n for student in G.A:\n pref_list = G.E[student]\n E[student] = pref_list[:] # store the pref list of student in E\n for elective in pref_list:\n E[elective].append(student)\n\n for elective in G.B:\n random.shuffle(G.E[elective])\n return graph.BipartiteGraph(G.A, G.B, E, G.capacities)",
"def subsample_graph(graph, max_degree,\n rng):\n edges = sampler.get_adjacency_lists(graph)\n edges = sampler.sample_adjacency_lists(edges, graph.train_nodes, max_degree,\n rng)\n senders = []\n receivers = []\n for u in edges:\n for v in edges[u]:\n senders.append(u)\n receivers.append(v)\n\n graph.senders = senders\n graph.receivers = receivers\n return graph",
"def test_node_sampling(weighted_graph_config_fixture):\n w_config = weighted_graph_config_fixture\n\n # Node 5 to node 4 has zero weight (zero transition probability)\n # Node 4 to node 5 has ten weight (high transition probability)\n edges = pd.DataFrame({'source_content_id': [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5],\n 'destination_content_id': [5, 1, 0, 3, 4, 1, 2, 1, 3, 5, 3, 4],\n 'weight': [1, 2, 3, 4, 1, 2, 3, 4, 1, 10, 5, 0]}\n )\n wm = N2VModel()\n\n wm.create_graph(edges, w_config['weighted_graph'])\n\n wm.generate_walks(**w_config)\n\n wm.fit_model(**w_config, callbacks=EpochLogger())\n\n n_nodes = len(set(edges.source_content_id))\n n_transitions = n_nodes * (w_config['walk_length'] - 1) * w_config['num_walks']\n\n res = np.array([np.array(list(zip(x, x[1:]))).ravel() for x in wm.node2vec.walks])\n walks = np.reshape(res, (n_transitions, 2))\n\n pairs = pd.DataFrame({'state1': walks[:, 0], 'state2': walks[:, 1]})\n counts = pairs.groupby('state1')['state2'].value_counts().unstack()\n counts = counts.replace(np.nan, 0)\n assert pairs.shape == (n_nodes * (w_config['walk_length'] - 1) * w_config['num_walks'], 2)\n assert counts.iloc[5][4] == 0\n assert counts.iloc[4][5] != 0\n assert len(set(edges['source_content_id']).union(\n set(edges['destination_content_id']))) == len(wm.model.wv.vocab.keys())",
"def BiasedTree(N,alpha=0.): \n free = sample(range(N),N)\n nodes = [free.pop()]\n links = []\n K = np.zeros((N,))\n K[nodes[0]]=1.\n while free:\n newn = free.pop()\n K[newn]=1.\n p = K[np.array(nodes)]**alpha\n p = p/np.sum(p)\n mother = np.random.choice(nodes,p=p)\n K[mother] += 1.\n nodes.append(newn)\n links.append((mother,newn))\n return nx.DiGraph(links)",
"def sample_edges(probs):\n n = len(probs)\n probs = _dist.squareform(probs, force='tovector', checks=False)\n coins = _np.random.sample(probs.shape)\n edges = coins <= probs\n edges = _dist.squareform(edges)\n edges[_np.diag_indices_from(edges)] = 0\n return edges.astype(int)",
"def add_random_weights(G):\n for (_,_,d) in G.edges(data=True):\n d[\"weight\"] = random.random()",
"def sample(tree, i, alpha=0.5, beta=0.5, only_tree=True):\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\" + str(list(n)[0]) + \")\"\n # tree.node[n] = {\"color\": \"black\", \"label\": lab}\n # print tree.nodes()\n\n if only_tree is True:\n tree_new = tree # Alter the input tree\n else:\n #tree_new = tree.subgraph(tree.nodes()) # nx < 2.0\n tree_new = tree.copy() # nx < 2.0\n\n #print(nocopy)\n #old_G = trilearn.graph.junction_tree.get_graph(tree)\n #(subtree, old_separators, probtree) = glib.random_subtree(tree, alpha, beta)\n\n # plotGraph(subtree, directory+\"subtree_\"+str(i)+\".eps\")\n # for n in subtree.nodes():\n # tree_old.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n # if n in tree.nodes():\n # tree.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n\n # plotGraph(tree_old.subgraph(tree_old.nodes()),\n # directory + \"tree(\" + str(i-1) + \")p.eps\")\n\n (_, subtree_nodes, subtree_edges, subtree_adjlist,\n old_separators, prob_subtree) = ss.random_subtree(tree, alpha, beta, i)\n\n (old_cliques,\n new_cliques,\n new_separators,\n P,\n neig) = sample_cond_on_subtree_nodes(i, tree_new, subtree_nodes, subtree_edges, subtree_adjlist)\n\n if only_tree is True:\n return tree_new\n #conn_nodes = set()\n #for clique in new_cliques:\n # conn_nodes |= clique\n\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\"+str(list(n)[0])+\")\"\n # if n in new_cliques:\n # tree.node[n] = {\"color\": \"red\", \"label\": lab}\n # plotGraph(tree.subgraph(tree.nodes()), directory+\"tree(\"+str(i)+\").eps\")\n\n #G = trilearn.graph.junction_tree.get_graph(tree)\n # G.node[i] = {\"color\": \"red\"}\n # for n in old_G:\n # if n in conn_nodes:\n # old_G.node[n] = {\"color\": \"blue\"}\n # G.node[n] = {\"color\": \"blue\"}\n\n # plotGraph(G, directory+\"G\"+str(i)+\".eps\")\n # plotGraph(old_G, directory+\"G\"+str(i-1)+\"p.eps\")\n\n # Proposal kernel\n K_st = None\n if len(subtree_nodes) == 1:\n # There might be two possible subtrees so\n # we calculate the probabilities for these explicitly\n K_st = pdf(tree, tree_new, alpha, beta, i)\n else:\n K_st = prob_subtree\n for c in P:\n K_st *= P[c] * neig[c]\n return tree_new, K_st, old_cliques, old_separators, new_cliques, new_separators",
"def sample_edge_uniform(adj_list, degrees, n_triplets, sample_size):\n all_edges = np.arange(n_triplets)\n return np.random.choice(all_edges, sample_size, replace=False)",
"def random_graph(N, deg_sampler, directed=True,\n parallel_edges=False, self_loops=False, block_membership=None,\n block_type=\"int\", degree_block=False,\n random=True, verbose=False, **kwargs):\n\n g = Graph()\n\n if (type(block_membership) is types.FunctionType or\n type(block_membership) is types.LambdaType):\n btype = block_type\n bm = []\n if len(inspect.getargspec(block_membership)[0]) == 0:\n for i in range(N):\n bm.append(block_membership())\n else:\n for i in range(N):\n bm.append(block_membership(i))\n block_membership = bm\n elif block_membership is not None:\n btype = _gt_type(block_membership[0])\n\n if len(inspect.getargspec(deg_sampler)[0]) > 0:\n if block_membership is not None:\n sampler = lambda i: deg_sampler(i, block_membership[i])\n else:\n sampler = deg_sampler\n else:\n sampler = lambda i: deg_sampler()\n\n libgraph_tool_generation.gen_graph(g._Graph__graph, N, sampler,\n not parallel_edges,\n not self_loops, not directed,\n _get_rng(), verbose, True)\n g.set_directed(directed)\n\n if degree_block:\n if btype in [\"object\", \"string\"] or \"vector\" in btype:\n btype = \"object\"\n elif btype in [\"int\", \"int32_t\", \"bool\"]:\n btype = \"vector<int32_t>\"\n elif btype in [\"long\", \"int64_t\"]:\n btype = \"vector<int64_t>\"\n elif btype in [\"double\"]:\n btype = \"vector<double>\"\n elif btype in [\"long double\"]:\n btype = \"vector<long double>\"\n\n if block_membership is not None:\n bm = g.new_vertex_property(btype)\n if btype in [\"object\", \"string\"] or \"vector\" in btype:\n for v in g.vertices():\n if not degree_block:\n bm[v] = block_membership[int(v)]\n else:\n if g.is_directed():\n bm[v] = (block_membership[int(v)], v.in_degree(),\n v.out_degree())\n else:\n bm[v] = (block_membership[int(v)], v.out_degree())\n else:\n try:\n bm.a = block_membership\n except ValueError:\n bm = g.new_vertex_property(\"object\")\n for v in g.vertices():\n bm[v] = block_membership[int(v)]\n else:\n bm = None\n\n if random:\n g.set_fast_edge_removal(True)\n random_rewire(g, parallel_edges=parallel_edges,\n self_loops=self_loops, verbose=verbose,\n block_membership=bm, **kwargs)\n g.set_fast_edge_removal(False)\n\n if bm is None:\n return g\n else:\n return g, bm",
"def random_graph(n, m):\n G = Graph()\n for v in range(n):\n G.add_vertex(v)\n \n while G.num_edges() < m:\n G.add_edge(random.sample(range(n), 2))\n\n return G",
"def sample(self, graph: nx.classes.graph.Graph) -> nx.classes.graph.Graph:\n self._nodes = set()\n self._edges = set()\n self._check_graph(graph)\n self._graph = graph\n self._create_initial_seed_set()\n while len(self._nodes) < self.number_of_nodes:\n self._reweight()\n self._do_update()\n new_graph = nx.from_edgelist(self._edges)\n return new_graph",
"def sample_neighbors_biased(\n g,\n nodes,\n fanout,\n bias,\n edge_dir=\"in\",\n tag_offset_name=\"_TAG_OFFSET\",\n replace=False,\n copy_ndata=True,\n copy_edata=True,\n output_device=None,\n):\n if isinstance(nodes, list):\n nodes = F.tensor(nodes)\n if isinstance(bias, list):\n bias = F.tensor(bias)\n device = utils.context_of(nodes)\n\n nodes_array = F.to_dgl_nd(nodes)\n bias_array = F.to_dgl_nd(bias)\n if edge_dir == \"in\":\n tag_offset_array = F.to_dgl_nd(g.dstdata[tag_offset_name])\n elif edge_dir == \"out\":\n tag_offset_array = F.to_dgl_nd(g.srcdata[tag_offset_name])\n else:\n raise DGLError(\"edge_dir can only be 'in' or 'out'\")\n\n subgidx = _CAPI_DGLSampleNeighborsBiased(\n g._graph,\n nodes_array,\n fanout,\n bias_array,\n tag_offset_array,\n edge_dir,\n replace,\n )\n induced_edges = subgidx.induced_edges\n ret = DGLGraph(subgidx.graph, g.ntypes, g.etypes)\n\n if copy_ndata:\n node_frames = utils.extract_node_subframes(g, device)\n utils.set_new_frames(ret, node_frames=node_frames)\n\n if copy_edata:\n edge_frames = utils.extract_edge_subframes(g, induced_edges)\n utils.set_new_frames(ret, edge_frames=edge_frames)\n\n ret.edata[EID] = induced_edges[0]\n return ret if output_device is None else ret.to(output_device)",
"def generate_bag(pt,):\n global sel, mu, br, num_nodes, activity_wt\n\n scipy.random.seed()\n bag = [pt]\n points = [pt]\n\n # generate tree with required number of nodes\n while True:\n # process the current node\n curr_pt = points.pop(0)\n curr_pt_id = curr_pt[0]\n curr_pt_sp_id = curr_pt[1]\n\n # generate left descendant with a certain probability\n # the value of zero means to always generate\n if np.random.random() > 0:\n left_descendant = generate_descendant_activity(curr_pt[2:-1])\n left_part = np.append(curr_pt_id, tree_lc[curr_pt_sp_id])\n left_part = np.append(left_part, np.append(left_descendant,\n get_activity(modify_pt(left_descendant), activity_wt)\n )\n )\n\n # generate right descendant with a certain probability\n # the value of zero means to always generate\n if np.random.random() > 0:\n right_descendant = generate_descendant_activity(curr_pt[2:-1])\n right_part = np.append(curr_pt_id, tree_rc[curr_pt_sp_id])\n right_part = np.append(right_part, np.append(right_descendant,\n get_activity(modify_pt(right_descendant), activity_wt)\n )\n )\n\n points.append(left_part)\n points.append(right_part)\n bag.append(left_part)\n bag.append(right_part)\n\n if len(bag) >= num_nodes:\n break\n\n return bag[:num_nodes]",
"def er_random_graph_generator(n, p, ng, seed, w_base, w_top):\n\n f_er_graph_list = []\n for i in range(0, ng):\n f_g = nx.erdos_renyi_graph(n, p, seed + i, directed=False)\n for (u, v, w) in f_g.edges(data=True):\n w['weight'] = random.randint(w_base, w_top)\n f_er_graph_list.append(f_g)\n return f_er_graph_list",
"def bfsSample(G, source=None, k = 50):\n\twarn(\"networkit.sampling.bfsSample is deprecated, will be removed in future updates.\")\n\tif not source:\n\t\tsource = GraphTools.randomNode(G)\n\tn = G.numberOfNodes()\n\tvisited = [False]*n\n\tQ = [source]\n\tclosest = set([source])\n\tglobal found\n\tfound = 0\n\twhile len(Q) > 0 and found < k:\n\t\tu = Q.pop(0)\n\t\tdef enqueue(u,v,weight, eid):\n\t\t\tglobal found\n\t\t\tif not visited[v] and found < k:\n\t\t\t\tfound += 1\n\t\t\t\tvisited[v] = True\n\t\t\t\tQ.append(v)\n\t\t\t\tclosest.add(v)\n\t\tG.forEdgesOf(u, enqueue)\n\tprint(\"found {0} nodes\".format(len(closest)))\n\tG1 = GraphTools.subgraphFromNodes(G, closest)\n\treturn G1",
"def decimated_graph(g, p, q, seed=0):\n random.seed(seed)\n nodes = g.nodes()\n n = len(nodes)\n n_nodes = set(random.sample(nodes, int(p * n)))\n n_edges = [(a, b) for a, b in g.edges() if a in n_nodes and b in n_nodes]\n\n n_edges = random.sample(n_edges, int(len(n_edges) * q))\n\n name = \"decimated(%s, %s, %s, %s)\" % (g.name if g.name else hash(g), p, q,\n seed)\n\n return nx.Graph(n_edges, name=name)",
"def BFTM_(adj_list,labels):\n G_prime = nx.Graph()\n num_clusters = list(np.unique(labels))\n clusters = {i:[] for i in num_clusters}\n hood = {n.id:[i for i in num_clusters if i != labels[n.id]] for n in adj_list}\n \n #Add nodes to clusters\n for idx,n in enumerate(adj_list):\n clusters[labels[idx]].append(n.id)\n \n root_cluster = random.choice(num_clusters)\n root_id = random.choice(list(clusters[root_cluster]))\n queue = [adj_list[root_id]]\n clusters[labels[root_id]].remove(root_id)\n \n \n #BFTM\n while len(queue) > 0:\n node = queue.pop(0)\n for c_id in hood[node.id]:\n if len(clusters[c_id]) > 0:\n sample_id = random.choice(clusters[c_id])\n clusters[labels[sample_id]].remove(sample_id)\n queue.append(adj_list[sample_id])\n hood[sample_id].remove(labels[node.id])\n G_prime.add_edge(node,adj_list[sample_id])\n hood[node.id] = None\n #Handle leftover nodes\n if len(queue) == 0:\n remaining = [c for i,c in clusters.items() if len(c) > 0]\n for rem_cluster in remaining:\n for n in rem_cluster:\n added = False\n while not added:\n rand_n = random.choice(list(G_prime.nodes))\n if labels[rand_n.id] != labels[n.id]:\n G_prime.add_edge(n,rand_n)\n added = True\n \n \n #Cliqify\n for node in list(G_prime.nodes):\n if G_prime.degree(node) < len(num_clusters) - 1:\n for _1_hop in list(G_prime.neighbors(node)):\n for _2_hop in list(G_prime.neighbors(_1_hop)):\n if _2_hop != node and G_prime.degree(_2_hop) < len(num_clusters) - 1:\n G_prime.add_edge(node,_2_hop)\n \n return G_prime",
"def random_cstree(value_dict, order, ps, Ms):\n # TODO Look into moving contextn1,contextn2 inside the if merge block\n assert len(order)==len(list(value_dict.keys()))\n dag = generate_dag(len(order), 1)\n dag = nx.relabel_nodes(dag, lambda i: order[i-1])\n tree = dag_model(value_dict, dag, order)\n\n p = len(order)\n for level in range(1, p):\n #print(\"level\", level)\n # Generate a context randomly\n\n for _ in range(Ms[level-1]):\n current_level_nodes = [n for n in tree.nodes\n if nx.shortest_path_length(tree, \"Root\", n) == level]\n\n # Choose 2 random nodes\n random_node_pair = random.sample(current_level_nodes, 2)\n r_node1, r_node2 = random_node_pair[0], random_node_pair[1]\n context_n1 = tree.nodes[r_node1].get(\"context\", r_node1)\n context_n2 = tree.nodes[r_node2].get(\"context\", r_node2)\n\n # Merge their stages with probability ps[level-1]\n merge = True if np.random.uniform() < ps[level-1] else False\n\n if merge:\n common_subcontext = set(context_n1).intersection(set(context_n2))\n\n new_nodes = [n for n in current_level_nodes\n if common_subcontext.issubset(set(n))]\n\n # Existing contexts of nodes above if they exist\n existing_contexts = nonsingleton_stages(tree, new_nodes)\n\n if existing_contexts!=set():\n # If such contexts exist, the common context is the total intersection\n common_subcontext = common_subcontext.intersection(*list(existing_contexts))\n\n new_nodes = [n for n in current_level_nodes\n if common_subcontext.issubset(set(n))]\n\n for node in new_nodes:\n tree.nodes[node][\"context\"]=frozenset(common_subcontext)\n\n # Generate distribution with separate function\n tree_distr = tree_distribution(value_dict, tree, order)\n return tree, tree_distr",
"def bn_sample_joint_from_prior(beta, dag, node_ordering, n, ess, batch_size=1, random_state=None):\n d = dag.shape[1]\n\n repeated_list = [[True, False]] * d\n oc = np.array(list(itertools.product(*repeated_list)))\n\n pmat = np.ones((oc.shape[0], beta.shape[0]))\n k = 0\n for i in np.arange(d):\n node = node_ordering[i]\n par = np.where(dag[:, node] == 1)[0]\n\n if par.size != 0:\n repeated_list = [[True, False]] * (par.size)\n par_oc = np.array(list(itertools.product(*repeated_list)))\n alpha = ess / (oc.shape[0] * 2)\n\n for l in np.arange(par_oc.shape[0]):\n p0 = beta[:, k]\n k += 1\n ind = np.equal(oc[:, par], par_oc[l, :]).all(axis=1)\n\n ind0 = np.logical_and(ind.ravel(), (oc[:, node] == 0).ravel())\n pmat[ind0, :] = pmat[ind0, :] * np.tile(p0.reshape(1, -1),\n (np.sum(ind0), 1))\n\n ind1 = np.logical_and(ind.ravel(), (oc[:, node] == 1).ravel())\n pmat[ind1, :] = pmat[ind1, :] * np.tile(1 - p0.reshape(1, -1),\n (np.sum(ind1), 1))\n else:\n alpha = ess / 2\n p0 = beta[:, k]\n k += 1\n\n ind0 = (oc[:, node] == 0)\n pmat[ind0, :] = pmat[ind0, :] * np.tile(p0.reshape(1, -1),\n (np.sum(ind0), 1))\n\n ind1 = (oc[:, node] == 1)\n pmat[ind1, :] = pmat[ind1, :] * np.tile(1 - p0.reshape(1, -1),\n (np.sum(ind0), 1))\n\n return pmat",
"def generate_random_graph(num_nodes):\n root = Node()\n nodes = set([root])\n edge_count = 0\n num_edges = int(math.log(num_nodes, 1.7)) * num_nodes\n\n for i in range(1, num_nodes):\n node = Node()\n node.edges.add(random.sample(nodes, 1)[0])\n nodes.add(node)\n edge_count += 1\n\n # Generate edges until \n for j in range(edge_count, num_edges):\n tail, head = random.sample(nodes, 2)\n while head in tail.edges:\n tail, head = random.sample(nodes, 2)\n tail.edges.add(head)\n edge_count += 1\n \n # Convert our graph to CSR representation by first creating an adjacency\n # matrix and then transforming it to a CSR\n\n # Generating adjacency matrix\n adjacency_matrix = [[0] * num_nodes for i in range(num_nodes)]\n sums = defaultdict(int)\n stack = [root]\n visited = set()\n while stack:\n curr = stack.pop()\n if curr not in visited:\n visited.add(curr)\n for node in curr.edges:\n stack.append(node)\n adjacency_matrix[curr.id][node.id] = 1.0\n sums[curr.id] += 1\n\n # Adjacency matrix -> CSR\n offset = 0\n csr = [[] for i in range(3)]\n nonzeros = np.nonzero(adjacency_matrix)\n last_row = -1\n for i in range(len(nonzeros[0])):\n row = nonzeros[0][i]\n col = nonzeros[1][i]\n outdegree = sums[row]\n if last_row != row:\n csr[1].append(offset)\n csr[0].append(adjacency_matrix[row][col] / outdegree)\n csr[2].append(col)\n offset += 1\n last_row = row\n csr[1].append(offset)\n\n # Write to txt and pickle\n with open(generate_filepath_txt(num_nodes), \"w\") as fp:\n fp.write(' '.join(str(i) for i in csr[0]) + '\\n')\n fp.write(' '.join(str(i) for i in csr[1]) + '\\n')\n fp.write(' '.join(str(i) for i in csr[2]))\n with open(generate_filepath_pickle(num_nodes), \"wb\") as fp:\n pickle.dump(csr, fp)",
"def sample_edge_neighborhood(adj_list, degrees, n_triplets, sample_size):\n edges = np.zeros((sample_size), dtype=np.int32)\n\n # initialize\n sample_counts = np.array([d for d in degrees])\n picked = np.array([False for _ in range(n_triplets)])\n seen = np.array([False for _ in degrees])\n\n for i in range(0, sample_size):\n weights = sample_counts * seen\n\n if np.sum(weights) == 0:\n weights = np.ones_like(weights)\n weights[np.where(sample_counts == 0)] = 0\n\n probabilities = (weights) / np.sum(weights)\n chosen_vertex = np.random.choice(np.arange(degrees.shape[0]),\n p=probabilities)\n\n chosen_adj_list = adj_list[chosen_vertex]\n seen[chosen_vertex] = True\n\n chosen_edge = np.random.choice(np.arange(chosen_adj_list.shape[0]))\n chosen_edge = chosen_adj_list[chosen_edge]\n edge_number = chosen_edge[0]\n\n while picked[edge_number]:\n chosen_edge = np.random.choice(np.arange(chosen_adj_list.shape[0]))\n chosen_edge = chosen_adj_list[chosen_edge]\n edge_number = chosen_edge[0]\n\n edges[i] = edge_number\n other_vertex = chosen_edge[1]\n picked[edge_number] = True\n sample_counts[chosen_vertex] -= 1\n sample_counts[other_vertex] -= 1\n seen[other_vertex] = True\n\n return edges",
"def randomize_graph(graph, nodePositions, mask, planar=0, iterations=1000):\n nodeNumber = graph.number_of_nodes()\n edgeNumber = graph.number_of_edges()\n randomizedGraph = nx.empty_graph(nodeNumber, nx.MultiGraph())\n edgeLengths = np.array([property['edist'] for node1, node2, property in graph.edges(data=True)])\n bins = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 9999]\n binNumber = len(bins) - 1\n edgeBins = np.zeros(edgeNumber).astype('int')\n for index, (bin1, bin2) in enumerate(zip(bins[:-1], bins[1:])):\n edgesInBin = (edgeLengths >= bin1) * (edgeLengths < bin2)\n edgeBins[edgesInBin] = index\n edgeWeights = np.array([property['weight'] for node1, node2, property in graph.edges(data=True)])\n edgeCapacities = np.array([property['capa'] for node1, node2, property in graph.edges(data=True)])\n redoRandomization = 1\n iterationNumber = 0\n while (redoRandomization == 1 and iterationNumber < iterations):\n iterationNumber += 1\n nodePositionsRandom = cell_sample(mask, nodeNumber)[:, ::-1].astype('int')\n distanceMatrix = sp.spatial.distance_matrix(nodePositionsRandom, nodePositionsRandom)\n edgeBinsRandom = np.zeros((nodeNumber, nodeNumber)).astype('int')\n for index, (bin1, bin2) in enumerate(zip(bins[:-1], bins[1:])):\n edgesInBin = (distanceMatrix >= bin1) * (distanceMatrix < bin2)\n edgeBinsRandom[edgesInBin] = index\n edgeBinsRandom[np.tri(nodeNumber) > 0] =- 9999\n redoRandomization = 1 * np.max([(edgeBinsRandom == bins).sum() < (edgeBins == bins).sum() for bins in range(binNumber)])\n if (iterationNumber < iterations):\n sortBins = np.argsort(edgeLengths)[::-1]\n edgeBinsSort = edgeBins[sortBins]\n edgeWeightsSort = edgeWeights[sortBins]\n edgeCapacitiesSort = edgeCapacities[sortBins]\n addedEdges = []\n for edge in range(edgeNumber):\n candidateNodes = np.where(edgeBinsRandom == edgeBinsSort[edge])\n candidateNumber = len(candidateNodes[0])\n edgeCrossings = 9999\n selectedCandidates = random.sample(range(candidateNumber), min(50, candidateNumber))\n for candidate in selectedCandidates:\n node1 = candidateNodes[0][candidate]\n node2 = candidateNodes[1][candidate]\n edgeBetweenNodes = np.array([[nodePositionsRandom[node1][0], nodePositionsRandom[node2][0]], [nodePositionsRandom[node1][1], nodePositions[node2][1]]]).T\n crossingsOfEdges = planar * multi_line_intersect(np.array(edgeBetweenNodes), np.array(addedEdges)).sum()\n if (crossingsOfEdges < edgeCrossings and edgeBinsRandom[node1, node2] >= 0):\n edgeCrossings = crossingsOfEdges\n selectedEdge = edgeBetweenNodes\n selectedNode1, selectedNode2 = node1, node2\n addedEdges.append(selectedEdge)\n nodeDistanceRandom = distanceMatrix[selectedNode1, selectedNode2]\n filamentLengthRandom = 1.0 * np.ceil(nodeDistanceRandom)\n edgeWeightRandom = edgeWeightsSort[edge]\n edgeCapacityRandom = edgeCapacitiesSort[edge]\n edgeLengthRandom = 1.0 * filamentLengthRandom / edgeWeightRandom\n edgeConnectivityRandom = 0\n edgeJumpRandom = 0\n edgeMultiplicity = 1\n randomizedGraph.add_edge(selectedNode1, selectedNode2, edist=nodeDistanceRandom, fdist=filamentLengthRandom, weight=edgeWeightRandom, capa=edgeCapacityRandom, lgth=edgeLengthRandom, conn=edgeConnectivityRandom, jump=edgeJumpRandom, multi=edgeMultiplicity)\n edgeBinsRandom[selectedNode1, selectedNode2] =- 9999\n edgeBinsRandom[selectedNode2, selectedNode1] =- 9999\n else:\n edgeProperties = np.array([property for node1, node2, property in graph.edges(data=True)])\n random.shuffle(edgeProperties)\n randomizedGraph = graph.copy()\n for index, (node1, node2, properties) in enumerate(randomizedGraph.edges(data=True)):\n for key in properties.keys():\n properties[key] = edgeProperties[index][key]\n nodePositionsRandom = nodePositions\n return(randomizedGraph, nodePositionsRandom)",
"def rand_flip_graph(graph, edge):\n return rand_zero_or_one(0.5)\n # return rand_zero_or_one(edge_prob(graph, edge))",
"def random_by_nodes(graph: BELGraph, percentage: Optional[float] = None) -> BELGraph:\n if percentage is None:\n percentage = 0.9\n assert 0 < percentage <= 1\n\n nodes = graph.nodes()\n n = int(len(nodes) * percentage)\n\n subnodes = random.sample(nodes, n)\n\n result = graph.subgraph(subnodes)\n return result",
"def test_randomly_select_node_2(self):\n a, b, c, d = (n() for _ in range(4))\n\n g = BELGraph()\n g.add_edge(a, b)\n g.add_edge(b, c)\n g.add_edge(b, d)\n\n self.assertEqual(1, g.degree(a))\n self.assertEqual(3, g.degree(b))\n self.assertEqual(1, g.degree(c))\n self.assertEqual(1, g.degree(d))\n\n no_grow = {c}\n\n node_counter = Counter(\n randomly_select_node(g, no_grow, self.random_state)\n for _ in range(self.trials)\n )\n\n self.assertIn(a, node_counter)\n self.assertAlmostEqual((1 / 5), node_counter[a] / self.trials, places=2)\n\n self.assertIn(b, node_counter)\n self.assertAlmostEqual((3 / 5), node_counter[b] / self.trials, places=2)\n\n self.assertNotIn(c, node_counter)\n\n self.assertIn(d, node_counter)\n self.assertAlmostEqual((1 / 5), node_counter[d] / self.trials, places=2)",
"def get_random_depth_sample(n=8, depths=list(range(2,26,2)), num_samples=100):\n\n def get_states(start):\n frontier = [start]\n frontier_set = {start}\n explored = set()\n\n states = [False for _ in range(len(depths))]\n while not all(states):\n node = frontier.pop(0)\n frontier_set.remove(node)\n explored.add(node)\n\n children = node.get_children()\n\n # It's necessary to shuffle children to get a truly random sample; otherwise, the first child (always\n # produced from the parent by the same action) produced at a certain depth will always be selected,\n # and children produced by other actions will never be selected\n shuffle(children)\n\n for child in children:\n if child not in frontier_set and child not in explored:\n frontier_set.add(child)\n frontier.append(child)\n child.path_cost = node.path_cost+1\n index = depths.index(child.path_cost) if child.path_cost in depths else None\n if index is not None and not states[index]:\n states[index] = {'start': start.sequence, 'end': child.sequence}\n\n return states\n\n depth_sample = [[] for depth in range(len(depths))]\n\n for _ in range(num_samples):\n start = list(range(1,n+2))\n shuffle(start)\n start = PuzzleState(start, path_cost=0)\n\n states = get_states(start)\n print('\\rSet ' + str(_+1) + ' of ' + str(num_samples) + ' complete', end='', flush=True)\n list(map(list.append, depth_sample, states))\n\n return depth_sample",
"def evolve(pop_perf, breed_method):\n # Sort on the scores.\n pop = [x[1] for x in sorted(pop_perf, key=lambda x: x[0], reverse=True)]\n\n # keep the best 25%\n retain_length = 2 #int(np.ceil(len(pop)*.25))\n\n # The parents are every network we want to keep.\n parents = pop[:retain_length]\n\n # Randomly mutate the networks we're keeping, and add these\n # This preserves the already good networks, so we don't lose out.\n mutated = []\n for index, individual in enumerate(parents):\n mutated.append(mutate(parents[index]))\n parents.extend(mutated)\n\n # For those we aren't keeping, randomly add 10% of population to increase variance. Mutate them individually, then add. \n # Mutation because we already know they are bad, should try something else. Something like that.\n num_poor = 2#int(math.ceil(len(pop)*.1))\n poor_keeping = random.sample(pop[retain_length:], num_poor)\n for poor_sch in poor_keeping:\n parents.append(mutate(poor_sch))\n\n # Now find out how many spots we have left to fill. (how many children to make, about 40% of full pop)\n parents_length = len(parents)\n desired_length = len(pop) - parents_length\n children = []\n\n\n # Add children, which are bred from two remaining networks.\n while len(children) < desired_length:\n\n # Get a random mom and dad.\n male = random.randint(0, parents_length-1)\n female = random.randint(0, parents_length-1)\n\n # Assuming they aren't the same network...\n if male != female:\n male = parents[male]\n female = parents[female]\n\n # pick breeding method:\n if random.random() > .5:\n way = 'mean'\n else:\n way = 'random'\n \n # Breed them.\n babies = breed_method(male, female, way)\n\n # children.append(babies[desired_length:])\n # Add the children one at a time.\n for baby in babies:\n # Don't grow larger than desired length.\n if len(children) < desired_length:\n children.append(baby)\n parents.extend(children)\n return parents",
"def generate_sampled_graph_and_labels(triplets, sample_size, split_size,\n num_rels, adj_list, degrees,\n negative_rate, sampler=\"uniform\"):\n # perform edge neighbor sampling\n if sampler == \"uniform\":\n edges = sample_edge_uniform(adj_list, degrees, len(triplets), sample_size)\n elif sampler == \"neighbor\":\n edges = sample_edge_neighborhood(adj_list, degrees, len(triplets), sample_size)\n else:\n raise ValueError(\"Sampler type must be either 'uniform' or 'neighbor'.\")\n\n # relabel nodes to have consecutive node ids\n edges = triplets[edges]\n src, rel, dst = np.array(edges).transpose()\n uniq_v, edges = np.unique((src, dst), return_inverse=True)\n\n src, dst = np.reshape(edges, (2, -1))\n relabeled_edges = np.stack((src, rel, dst)).transpose()\n\n # negative sampling\n samples, labels = negative_sampling(relabeled_edges, len(uniq_v),\n negative_rate)\n\n # further split graph, only half of the edges will be used as graph\n # structure, while the rest half is used as unseen positive samples\n split_size = int(sample_size * split_size)\n graph_split_ids = np.random.choice(np.arange(sample_size),\n size=split_size, replace=False)\n src = src[graph_split_ids]\n dst = dst[graph_split_ids]\n rel = rel[graph_split_ids]\n\n # build DGL graph\n g, rel, norm = build_graph_from_triplets(len(uniq_v), num_rels,\n (src, rel, dst))\n return g, uniq_v, rel, norm, samples, labels",
"def random_by_edges(graph: BELGraph, percentage: Optional[float] = None) -> BELGraph:\n if percentage is None:\n percentage = 0.9\n assert 0 < percentage <= 1\n\n number_edges = int(graph.number_of_edges() * percentage)\n rv = BELGraph()\n rv.add_edges_from(random.sample(graph.edges(keys=True, data=True), number_edges))\n return rv",
"def get_subsample_of_nodes(g, sampl=1):\n return sample(g.nodes(), int(len(g.nodes())*sampl))"
]
| [
"0.6734483",
"0.6609522",
"0.6599776",
"0.65759814",
"0.64373255",
"0.6393696",
"0.6348071",
"0.6322246",
"0.62644666",
"0.62157667",
"0.62151515",
"0.60374993",
"0.59690386",
"0.59388816",
"0.5910111",
"0.5908384",
"0.58963275",
"0.5890668",
"0.5856513",
"0.5833557",
"0.58108836",
"0.5793358",
"0.57758206",
"0.5756315",
"0.57351196",
"0.5732072",
"0.5725866",
"0.57252693",
"0.5715239",
"0.5711559"
]
| 0.7124525 | 0 |
Tests GET method on google authenticator | def test_read_ga(self):
url = reverse('admin_google_authenticator')
data = {
}
self.client.force_authenticate(user=self.admin)
response = self.client.get(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_0040_registration_get(self):\n response = self.fetch(\n '/registration', method=\"GET\", follow_redirects=False\n )\n self.assertEqual(response.code, 200)",
"def test_get_main_route_auth():\n set_token(client, \"user0011\")\n\n response = client.get(url)\n assert response.status_code == 202",
"def test_GET(self):\n if not self.url:\n return\n response = self.client.get(self.url, {}, format='json')\n self.assertIn(response.status_code, [status.HTTP_405_METHOD_NOT_ALLOWED,\n status.HTTP_401_UNAUTHORIZED])",
"def test_get(self):\n return self.doRequest(self.url, method=\"GET\", body=self.input)",
"def test_get_authenticated(self):\n self.verify_get_response(self.client.get(STATUS_PATH))",
"def test_two_legged_get(self):\n resp, content = self._two_legged(\"GET\")\n self.assertEqual(int(resp['status']), 200)",
"def test_get(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)",
"def test_get(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)",
"def test_get_without_oauth(self):\n self.client = trovebox.Trovebox(host=self.test_host)\n self._register_uri(httpretty.GET)\n response = self.client.get(self.test_endpoint)\n self.assertNotIn(\"authorization\", self._last_request().headers)\n self.assertEqual(response, self.test_data)",
"def test_doGet(self) -> None:\n\n status_code = apicall.doGet(URL, self._browserheader)\n print(\"in do get:\", status_code)\n assert status_code == API_SUCCESS",
"def test_login_get(self):\n resp = self.client.get(reverse('login'))\n assert resp.status_code == 405",
"def test_activate_unauthenticated_get(client):\n response = client.get(\"/auth/activate\")\n assert response.status_code == HTTPStatus.METHOD_NOT_ALLOWED",
"def test_get(self):\n self.assertEqual(200, self.resp.status_code)",
"def test_get(self):\n self.assertEqual(200, self.resp.status_code)",
"def test_0010_login_get(self):\n response = self.fetch('/login', method=\"GET\", follow_redirects=False)\n self.assertEqual(response.code, 200)",
"def test_get(self):\n expected_response = {\n 'id': 1111,\n 'first_name': 'Jhon',\n 'last_name': 'Doe',\n 'user_id': 1001,\n 'telegram_id': None\n }\n\n response = self.client.get(self.url)\n self.assertJSONEqual(json.dumps(expected_response), json.loads(response.content))\n self.assertEqual(response.status_code, 200)",
"def test_get_unauthenticated(self):\n del self.client.request_kwargs['auth']\n self.verify_get_response(self.client.get(STATUS_PATH))",
"def test_get(self):\n self.assertEqual(200, self.response.status_code)",
"def test_get(self):\n self.assertEqual(200, self.response.status_code)",
"def test_access_token_get(self):\n client = oauth.Client(self.consumer, None)\n resp, content = client.request(self._uri('request_token'), \"GET\")\n\n self.assertEqual(int(resp['status']), 200)",
"def test_00_api_get(self):\r\n # GET as Anonymous\r\n url = '/api/'\r\n action = 'get'\r\n self.check_limit(url, action, 'app')",
"def test_client_verification_retrieve(self):\n pass",
"def test_get(self):\n pass",
"def test_get_oauth2_discovery(self):\n response = self.client.get(reverse('oauth_authorization_server'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"userinfo_endpoint\")",
"def google_verify(request):\n return {}",
"def http_method_get():\n return 'GET'",
"def test_profile_api_get(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)",
"def test_get(self):\n url, port = self.server.address\n\n #couple of basic GETs\n r = self.client.get(\"http://{0}:{1}/\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}/200\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}/400\".format(url, port))\n self.assertEqual(400, r.status_code)\n\n # GETs with params\n r = self.client.get(\"http://{0}:{1}/get_with_params\".format(url, port),\n params=self.params)\n self.assertEqual(200, r.status_code)\n self.assertEqual(str(self.params), r.text)\n\n # GETs with ...?",
"def test_signup_GET(self):\r\n\r\n with self.client:\r\n response = self.client.get('/signup')\r\n self.assertEqual(response.status_code, 200)\r\n self.assertIn(b'Create a free account', response.data)",
"def test_get_with_parameters(self):\n self._register_uri(httpretty.GET)\n response = self.client.get(self.test_endpoint,\n foo=\"bar\", spam=\"eggs\")\n self.assertIn(\"OAuth\", self._last_request().headers[\"authorization\"])\n self.assertEqual(self._last_request().querystring[\"foo\"], [\"bar\"])\n self.assertEqual(self._last_request().querystring[\"spam\"], [\"eggs\"])\n self.assertEqual(response, self.test_data)\n self.assertEqual(self.client.last_url, self.test_uri)\n self.assertEqual(self.client.last_params, {\"foo\": b\"bar\",\n \"spam\": b\"eggs\"})\n self.assertEqual(self.client.last_response.json(), self.test_data)"
]
| [
"0.692226",
"0.6907537",
"0.69011676",
"0.684241",
"0.68108356",
"0.6698029",
"0.6575124",
"0.6575124",
"0.6561964",
"0.6536934",
"0.65270597",
"0.6493965",
"0.6480874",
"0.6480874",
"0.6438329",
"0.63903564",
"0.63852197",
"0.63834625",
"0.63834625",
"0.63336784",
"0.6310726",
"0.6299449",
"0.6265377",
"0.62653446",
"0.62613904",
"0.6256544",
"0.62541854",
"0.6240502",
"0.6233012",
"0.6227091"
]
| 0.75863326 | 0 |
Tests PUT method on google authenticator | def test_update_ga(self):
url = reverse('admin_google_authenticator')
data = {
}
self.client.force_authenticate(user=self.admin)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_PUT(self):\n if not self.url:\n return\n response = self.client.put(self.url, {}, format='json')\n self.assertIn(response.status_code, [status.HTTP_405_METHOD_NOT_ALLOWED,\n status.HTTP_401_UNAUTHORIZED])",
"def test_put_method(self):\n self.getPage('/blah', method='PUT')\n self.assertStatus('200 OK')\n self.assertHeader('Content-Type', 'application/json')\n self.assertBody('{\"mystring\": \"blah\"}')",
"def test_client_can_do_put_request(self):\n response = self.httpbin_4.test_requests_put_method()\n self.assertEqual(response.request.method, 'PUT')\n self.assertEqual(response.status_code, 200)",
"def test_put(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n \n # Mock good response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.PUT, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.put(rest_url)",
"def test_kyc_put_request(self):\n pass",
"def test_api_user_put(self):\n pass",
"def test_put(self):\n url, port = self.server.address\n\n #couple of basic POSTs\n r = self.client.get(\"http://{0}:{1}/\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}/200\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}/400\".format(url, port))\n self.assertEqual(400, r.status_code)\n\n r = self.client.put(\"http://{0}:{1}/400?foo=bar\".format(url, port))\n self.assertEqual(400, r.status_code)",
"def test_put_success(self):\n test_data = {\n 'first_name': 'new_first_name',\n 'last_name': 'new_last_name'\n }\n response = self.client.put(self.url, json.dumps(test_data), content_type='application/json')\n self.assertEquals(response.status_code, 200)",
"def http_method_put():\n return 'PUT'",
"def simulate_put(app, path, **kwargs) -> _ResultBase:\n return simulate_request(app, 'PUT', path, **kwargs)",
"def test_PUT4(self):\n payload = {\n \"make\": \"Nissan\",\n \"model\": \"Skyline\",\n \"year\": 1999,\n \"price\": 2200\n }\n r = requests.put(self.address + \"/loremipsum/42\", json=payload)\n self.assertEqual(r.status_code, 400)",
"def test_put_unauthenticated(self):\n\n url = reverse('file')\n\n data = {}\n\n response = self.client.put(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def _put(self, url, data, extra_headers=None):\n headers = {'X-Requested-By': 'Unit Tests'}\n headers.update(extra_headers)\n return self.client.put(\n url,\n content_type='application/json',\n data=utils.as_json(data),\n headers=headers,\n )",
"def put(self, request, *args, **kwargs):\n verify_secure(request)\n return super().put(request, args, kwargs)",
"def put(self, request, *args, **kwargs):\n verify_secure(request)\n return super().put(request, args, kwargs)",
"def test_validate_put_new(client):\n response = client.put(\n '/user/2',\n data=json.dumps({\n 'name': 'Elissa Knupp',\n 'email': '[email protected]',\n }),\n headers={'Content-Type': 'application/json'}\n )\n assert response.status_code == 400\n assert response.json['message'] == INVALID_ACTION_MESSAGE",
"def test_put_no_profile(self):\n test_data = {\n 'first_name': 'new_first_name',\n 'last_name': 'new_last_name'\n }\n response = self.second_client.put(self.url, json.dumps(test_data), content_type='application/json')\n self.assertEquals(response.status_code, 400)",
"def do_PUT(self,):\n self.http_method = 'PUT'\n # Nothing to do for now.\n pass",
"def test_kyc_put_request_legal(self):\n pass",
"def test_update(self):\n url_register = reverse('auth_register')\n resp = self.client.post(url_register, {\n \"username\": \"user\",\n \"password\": \"lol1lol1\",\n \"password2\": \"lol1lol1\",\n \"email\": \"[email protected]\",\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"bio\": \"\"\n })\n print(resp.headers[\"Location\"])\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n self.assertEqual(User.objects.count(), 1)\n url_auth = reverse('token_obtain_pair')\n resp = self.client.post(url_auth, {'username':'user', 'password':'lol1lol1'}, format='json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n token = resp.data['access']\n\n url_upd = reverse('auth_update_profile', kwargs={'pk': 2})\n\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Bearer ' + token)\n resp = client.patch(url_upd, {\n \"username\": \"user3\",\n \"email\": \"[email protected]\",\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"image\": \"\",\n \"bio\": \"\",\n \"city\": \"\",\n \"phone\": \"\"\n })\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(User.objects.get().username, 'user3')",
"def test_validate_put_existing(client):\n response = client.put(\n '/user/1',\n data=json.dumps({\n 'name': 'Jeff Knupp',\n 'email': '[email protected]',\n }),\n headers={'Content-Type': 'application/json'}\n )\n assert response.status_code == 400\n assert response.json['message'] == INVALID_ACTION_MESSAGE",
"def _put(self, *args, **kwargs):\n return self._request('put', *args, **kwargs)",
"async def simulate_put(self, path='/', **kwargs) -> _ResultBase:\n return await self.simulate_request('PUT', path, **kwargs)",
"def put(self, *args, **kwargs):\n self.request(\"put\", *args, **kwargs)",
"def test_post(self):\n self.response = self.client.put(self.url, dict(name='São Paulo'))\n self.assertEqual(\n status.HTTP_405_METHOD_NOT_ALLOWED, self.response.status_code)",
"def test_update_should_not_be_allowed(self):\n response = self.client.put(self.get_url(), {})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)",
"def test_put_one(self):\n response = self.client.put('/api/v1/parcels/100')\n self.assertEqual(response.status_code, 200)",
"def simulate_put(self, path='/', **kwargs) -> _ResultBase:\n return self.simulate_request('PUT', path, **kwargs)",
"def test_put(self):\n self.seed_static_data()\n\n params = {\n 'id': 2,\n 'event_id': 1,\n 'tag_type': 'REGISTRATION',\n 'name': {\n 'en': 'Renamed English Name', # Rename\n 'zu': 'Zulu Name'\n },\n 'description': {\n 'en': 'Renamed English Description',\n 'zu': 'Zulu Description'\n },\n 'active': True\n }\n\n response = self.app.put(\n '/api/v1/tag', \n headers=self.user1_headers, \n data=json.dumps(params),\n content_type='application/json')\n self.assertEqual(response.status_code, 200)\n\n response = self.app.get('/api/v1/tag', headers=self.user1_headers, data={'id': 2, 'event_id': 1, 'language': 'en'})\n data = json.loads(response.data)\n\n self.assertEqual(data['id'], 2)\n self.assertEqual(data['event_id'], 1)\n self.assertEqual(data['tag_type'], 'REGISTRATION')\n self.assertDictEqual(data['name'], {\n 'en': 'Renamed English Name',\n 'zu': 'Zulu Name'\n })\n self.assertDictEqual(data['description'], {\n 'en': 'Renamed English Description',\n 'zu': 'Zulu Description'\n })",
"def simulate_put(self, path='/', **kwargs):\n return self.simulate_request('PUT', path, **kwargs)"
]
| [
"0.75060594",
"0.74279714",
"0.7279392",
"0.7239791",
"0.71154946",
"0.7027044",
"0.6975747",
"0.6921709",
"0.68776083",
"0.68717974",
"0.6840171",
"0.6779103",
"0.6754017",
"0.6685355",
"0.6685355",
"0.6670062",
"0.6611915",
"0.6585054",
"0.6574342",
"0.6565525",
"0.65529203",
"0.64922637",
"0.64908326",
"0.6461315",
"0.6452661",
"0.64328843",
"0.64304227",
"0.64273787",
"0.6418945",
"0.63917834"
]
| 0.74890983 | 1 |
Tests POST method on google authenticator | def test_create_ga(self):
url = reverse('admin_google_authenticator')
data = {
}
self.client.force_authenticate(user=self.admin)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_0050_registration_post_1(self):\n response = self.fetch(\n '/registration', method=\"POST\", follow_redirects=False,\n body=urlencode({'name':'anoop', 'email':'[email protected]',\n 'password':'openlabs', 'confirm_password':'wrong'}\n )\n )\n self.assertEqual(response.code, 200)\n self.assertEqual(\n response.body.count(\n u'There were error(s) in processing your registration.'\n ), 1\n )",
"def test_api_user_submit_token_post(self):\n pass",
"def test_do_post__OK(self):\n params = {'token': 'checked in base class'}\n with test_app.test_request_context(self.request_path, json=params):\n session.clear()\n testing_config.sign_in('[email protected]', 111)\n actual = self.handler.do_post()\n\n self.assertIn('signed_user_info', session)\n self.assertIn('token', actual)\n self.assertIn('token_expires_sec', actual)",
"def test_response_has_auth_token(self, request_post):\n gae_req = AppEngineRequest(url=\"/foo\", appid=\"test\", source=\"test\",\n email=\"[email protected]\", password=\"foobar\")\n\n request_post.return_value = Mock(text=\"Auth=my_token\\n\")\n\n token = gae_req.get_auth_token()\n\n self.assertEqual(token, \"my_token\")\n self.assertIsNone(gae_req.sid)\n\n data = {\n \"Email\": \"[email protected]\",\n \"Passwd\": \"foobar\",\n \"service\": \"ah\",\n \"source\": \"test\",\n \"accountType\": \"HOSTED_OR_GOOGLE\"\n }\n\n request_post.assert_called_once_with(\n \"https://www.google.com/accounts/ClientLogin\", data=data)",
"def test_post_method(self):\n self.getPage('/', method='POST')\n self.assertStatus('200 OK')\n self.assertHeader('Content-Type', 'application/json')",
"def test_registration(self):\n response = self.client_app.post(\n '/api/v1/auth/signup/',\n data=json.dumps(dict(\n last_name='james',\n email='[email protected]',\n password='123456sddfdf'\n )),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['email'] == '[email protected]')\n self.assertTrue(data['first_name'] is None)\n self.assertTrue(data['last_name'] == 'james')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 201)",
"def test_post(self):\n return self.doRequest(self.url, method=\"POST\", body=self.input)",
"def test_authflow(self):\n response = self.client.post('/auth/signup/', {\n 'first_name': 'John',\n 'last_name': 'Doe',\n 'email': '[email protected]',\n 'password': self.password,\n 'gstin': '11AAAAA1111A1A1',\n 'mobile': self.mobile,\n 'business_name': 'busi_ness',\n 'address': {'address_name':'', 'address_line1': '', 'address_line2': '', 'state': '', 'pincode': '209801', 'country': 'INDIA'}\n })\n\n response_data = response.json()\n\n self.assertListEqual(list(response_data.keys()), ['id', 'otp'])\n\n response = self.client.post('/auth/verify-otp/', response_data)\n\n response_data = response.json()\n self.assertListEqual(list(response_data.keys()), ['token', 'refresh_token', 'session_key'])\n self.assertRegexpMatches(response_data['token'], r'[0-9A-Za-z\\-]+\\.[0-9A-Za-z\\-]+\\.[0-9A-Za-z\\-]+')\n self.assertRegexpMatches(response_data['refresh_token'], r'[0-9A-Za-z]{32}')\n self.assertRegexpMatches(response_data['session_key'], r'[0-9A-Za-z]{32}')\n\n response = self.client.post('/auth/signin/', {'id_field': self.mobile, 'password': self.password})\n auth_data = response.json()\n\n refresh_token = auth_data['refresh_token']\n session_key = auth_data['session_key']\n\n response = self.client.post('/auth/refresh/', {'refresh_token': refresh_token}, HTTP_AUTHORIZATION='JWT ' + auth_data['token'], HTTP_X_SESSION_KEY=session_key)\n\n refreshed_auth_data = response.json() \n response = self.client.get('/auth/handle-sessions/', HTTP_AUTHORIZATION='JWT ' + refreshed_auth_data['token'], HTTP_X_SESSION_KEY=session_key)\n\n active_sessions = response.json()\n self.assertListEqual(list(active_sessions.keys()), ['token_list'])\n\n acitve_sessions_token_list = active_sessions.get('token_list')\n\n # end all other sessions except your own\n for session_key_iter in acitve_sessions_token_list:\n if session_key_iter != session_key:\n self.client.post('/auth/handle-sessions/', {'session_key': session_key_iter}, HTTP_AUTHORIZATION='JWT ' + refreshed_auth_data['token'], HTTP_X_SESSION_KEY=session_key)\n\n # log out from own session\n self.client.get('/auth/signout/', HTTP_AUTHORIZATION='JWT ' + refreshed_auth_data['token'], HTTP_X_SESSION_KEY=session_key)",
"def test_login(self):\n self.client_app.post(\n '/api/v1/auth/signup/',\n data=json.dumps(dict(\n last_name='james',\n email='[email protected]',\n password='123456sddfdf'\n )),\n content_type='application/json'\n )\n response = self.client_app.post(\n '/api/v1/auth/login/',\n data=json.dumps(dict(\n email='[email protected]',\n password='123456sddfdf',\n )),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['email'] == '[email protected]')\n self.assertTrue(data['auth_token'])\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 200)",
"def test_POST(self):\n if not self.url:\n return\n response = self.client.post(self.url, {}, format='json')\n self.assertIn(response.status_code, [status.HTTP_405_METHOD_NOT_ALLOWED,\n status.HTTP_401_UNAUTHORIZED])",
"def test_api_user_post(self):\n pass",
"def test_response_has_auth_token_and_sid(self, request_post):\n gae_req = AppEngineRequest(url=\"/foo\", appid=\"test\", source=\"test\",\n email=\"[email protected]\", password=\"foobar\")\n\n request_post.return_value = Mock(text=\"Auth=my_token\\nSID=my_sid\")\n\n token = gae_req.get_auth_token()\n\n self.assertEqual(token, \"my_token\")\n self.assertEqual(gae_req.sid, \"my_sid\")\n\n data = {\n \"Email\": \"[email protected]\",\n \"Passwd\": \"foobar\",\n \"service\": \"ah\",\n \"source\": \"test\",\n \"accountType\": \"HOSTED_OR_GOOGLE\"\n }\n\n request_post.assert_called_once_with(\n \"https://www.google.com/accounts/ClientLogin\", data=data)",
"def test_post(self):\n pass",
"def post(self):\n\n data = request.get_json()\n # data = request.data\n print(\"data: \", data)\n\n arg_parser = reqparse.RequestParser()\n arg_parser.add_argument(\n \"exp\",\n default=15552000,\n help=\"Parameter must be an integer\",\n type=int\n )\n\n args = arg_parser.parse_args()\n\n print(args)\n\n auth = request.authorization\n print(\"auth req: \", auth)\n if not auth:\n # Try extracting from POST body\n print(\"here\")\n auth = request.get_json()\n print(\"here\")\n print(\"auth: \", auth)\n if not auth or not (\"email\" in auth and \"password\" in auth):\n abort(401, \"Missing authentication credentials\")\n\n # if auth[\"is_driver\"]:\n # # if it is a driver\n # user = Driver.identify(auth[\"email\"])\n # password = auth[\"password\"]\n\n # else:\n # # If it is a restaurant\n # user = Restaurant.identify(auth[\"email\"])\n # password = auth[\"password\"]\n\n is_driver = True\n\n user = Driver.identify(auth[\"email\"])\n password = auth[\"password\"]\n\n if not user:\n user = Restaurant.identify(auth[\"email\"])\n is_driver = False\n\n if not user or not user.verify_password(password):\n current_app.logger.warn(\n \"Incorrect credentials for {} from {}\".format(\n auth[\"email\"],\n *request.access_route\n )\n )\n abort(401, \"Incorrect email or password\")\n\n access_token = user.gen_access_token(args[\"exp\"])\n\n current_app.logger.info(\"[AUTH] User {} logged IN from {}\".format(\n user.email,\n *request.access_route\n ))\n\n access_token.update({\n \"is_driver\": is_driver\n })\n\n # return resp, 200\n return access_token",
"def test_registration(self):\n response = self.client.post(\n '/api/v1/auth/register',\n data=json.dumps(dict(\n username='joe',\n password='123456'\n )),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Successfully registered.')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 201)",
"def test_doPost(self) -> None:\n status_code = apicall.doPost(URL, self._browserheader)\n assert status_code != API_SUCCESS",
"def test_user_login(app):\n login_res = app.post('/auth', data={\n 'email': '[email protected]',\n 'password': 'password'\n },content_type='application/json') \n assert login_res.status_code == 200",
"def test_registration_successful(self):\n self.response = self.client.post(\n \"/api/users/\",\n {\"user\": {\n \"username\": \"kake\",\n \"email\": '[email protected]',\n \"password\": \"123445abcdefghijk\",\n }\n },\n format=\"json\"\n )\n self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)",
"def test_sign_up_view_with_post_request(self):\n form_data = {\n \"email\": \"[email protected]\",\n \"next_url\": \"home\"\n }\n response = self.client.post('/signup/', form_data, follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.request['PATH_INFO'], '/home/')",
"def test_empty_request_returns_empty(self, request_post):\n gae_req = AppEngineRequest(url=\"/foo\", appid=\"test\")\n\n request_post.return_value = Mock(text=\"\")\n\n self.assertRaises(UnableToAuthenticate, gae_req.get_auth_token)\n\n data = {\n \"Email\": \"\",\n \"Passwd\": \"\",\n \"service\": \"ah\",\n \"source\": \"\",\n \"accountType\": \"HOSTED_OR_GOOGLE\"\n }\n\n request_post.assert_called_once_with(\n \"https://www.google.com/accounts/ClientLogin\", data=data)",
"def test_0020_login_post_1(self):\n response = self.fetch(\n '/login', method=\"POST\", follow_redirects=False,\n body=urlencode({\n 'email': '[email protected]', 'password': 'password'\n })\n )\n self.assertEqual(response.code, 200)\n self.assertEqual(\n response.body.count(u'The email or password is invalid'), 1\n )",
"def test_read_ga(self):\n\n url = reverse('admin_google_authenticator')\n\n data = {\n }\n\n self.client.force_authenticate(user=self.admin)\n response = self.client.get(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)",
"def test_signup(self):\n res = self.client.get(\"/registration\")\n data = res.data.decode(\"utf-8\")\n assert res.status == \"200 OK\"\n assert \"Create Account\" in data",
"def test_validate_post(client):\n response = client.post(\n '/user/',\n data=json.dumps({\n 'name': 'Jeff Knupp',\n 'email': '[email protected]',\n }),\n headers={'Content-Type': 'application/json'}\n )\n assert response.status_code == 400\n assert response.json['message'] == INVALID_ACTION_MESSAGE",
"def test_registration_page(self):\n res = self.app.post('/register')\n self.assertEqual(res.status_code, 400)\n res = self.app.get('/register')\n self.assertEqual(res.status_code, 200)",
"def test_access_token_post(self):\n client = oauth.Client(self.consumer, None)\n resp, content = client.request(self._uri('request_token'), \"POST\")\n\n self.assertEqual(int(resp['status']), 200)\n\n res = dict(parse_qsl(content))\n self.assertTrue(b'oauth_token' in res)\n self.assertTrue(b'oauth_token_secret' in res)",
"def test_post(self):\n self.assertEqual(\n status.HTTP_405_METHOD_NOT_ALLOWED, self.response.status_code)",
"def test_profile_view_with_post_request(self):\n data = {\n \"first_name\": \"Test2\",\n \"last_name\": \"User2\"\n }\n response = self.client.post('/profile/', data)\n self.assertEqual(response.status_code, 302)",
"def test_post(self):\n self.assertEqual(200, self.resp.status_code)",
"def test_post(self):\n self.assertEqual(200, self.resp.status_code)"
]
| [
"0.71235955",
"0.69123614",
"0.68825096",
"0.6866466",
"0.68049127",
"0.67935437",
"0.6757084",
"0.667855",
"0.6642896",
"0.66332465",
"0.6562463",
"0.6533614",
"0.65188736",
"0.64831114",
"0.6460359",
"0.6424149",
"0.64141566",
"0.6398847",
"0.6398251",
"0.6341648",
"0.6339199",
"0.63198596",
"0.63148457",
"0.6307057",
"0.63058233",
"0.6287878",
"0.6278325",
"0.6266432",
"0.62518156",
"0.62518156"
]
| 0.71679026 | 0 |
Tests DELETE method on ga without being an admin | def test_delete_ga_failure_no_admin(self):
url = reverse('admin_google_authenticator')
data = {
'google_authenticator_id': self.ga.id
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_delete_ga_failure_no_ga_id(self):\n\n url = reverse('admin_google_authenticator')\n\n data = {\n }\n\n self.client.force_authenticate(user=self.admin)\n response = self.client.delete(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_delete_ga_success(self):\n\n url = reverse('admin_google_authenticator')\n\n data = {\n 'google_authenticator_id': self.ga.id\n }\n\n self.client.force_authenticate(user=self.admin)\n response = self.client.delete(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n self.assertEqual(models.Google_Authenticator.objects.all().count(), 0)",
"def test_delete_ga_failure_ga_id_not_exist(self):\n\n url = reverse('admin_google_authenticator')\n\n data = {\n 'google_authenticator_id': '499d3c84-e8ae-4a6b-a4c2-43c79beb069a'\n }\n\n self.client.force_authenticate(user=self.admin)\n response = self.client.delete(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_delete(self):\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.delete(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )",
"def test_delete(self):\n pass",
"def test_none_admin_delete(self):\n\n with self.client:\n token = self.customer()\n id = 1\n response = self.client.delete('api/v1/meals/{}'.format(id),\n headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Customer is not authorized to access this page\")\n self.assertEqual(response.status_code, 401)",
"def test_DELETE(self):\n if not self.url:\n return\n response = self.client.delete(self.url, {}, format='json')\n self.assertIn(response.status_code, [status.HTTP_405_METHOD_NOT_ALLOWED,\n status.HTTP_401_UNAUTHORIZED])",
"def test_delete_permission(self):\r\n self.assertFalse(self.creator_admin.has_delete_permission(self.request))",
"def test_delete(self):\n user = self.custodian_1_user\n urls = [reverse('api:user-detail', kwargs={'pk': user.pk})]\n data = None\n access = {\n \"forbidden\": [self.anonymous_client, self.readonly_client, self.custodian_1_client, self.admin_client,\n self.custodian_2_client],\n \"allowed\": []\n }\n\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.delete(url, data, format='json').status_code,\n [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n self.assertEqual(\n client.delete(url, data, format='json').status_code,\n status.HTTP_200_OK\n )",
"def test_delete_o_auth_client_authorization(self):\n pass",
"def test_delete_o_auth_client(self):\n pass",
"def test_client_nationlity_delete(self):\n pass",
"def test_delete_campaign_by_admin_passes(self):\n response = self.client.delete(\n f\"{self.endpoint_url}{self.test_campaign.id}/\",\n headers={\"Authorization\": self.admin_token},\n )\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_body[\"Success\"], \"Campaign deleted\")",
"def test_post_delete_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n self.client.force_authenticate(user=self.superuser)\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)",
"def test_delete_admin_from_org(self):\n pass",
"def test_duo_account_delete(self):\n pass",
"def test_delete_author_unlogged(self):\n request = self.client.delete(self.epoint)\n self.assertEqual(request.status_code, status.HTTP_403_FORBIDDEN)",
"def test_dashboards_v2_delete(self):\n pass",
"def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass",
"def test_delete_goal(self):\n pass",
"def test_delete_method(self):\n self.getPage('/blah', method='PUT')\n self.getPage('/', method='DELETE')\n self.assertStatus('204 No Content')\n self.assertHeader('Content-Type', 'application/json')",
"def test_client_verification_document_delete(self):\n pass",
"def test_delete__valid(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with register.app.test_request_context(self.request_path):\n actual_json = self.handler.do_delete(self.feature_id)\n self.assertEqual({'message': 'Done'}, actual_json)\n\n revised_feature = models.Feature.get_by_id(self.feature_id)\n self.assertTrue(revised_feature.deleted)",
"def test_delete__forbidden(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with register.app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.Forbidden):\n self.handler.do_delete(self.feature_id)\n\n revised_feature = models.Feature.get_by_id(self.feature_id)\n self.assertFalse(revised_feature.deleted)",
"def test_client_can_do_delete_request(self):\n response = self.httpbin_4.test_requests_delete_method()\n self.assertEqual(response.request.method, 'DELETE')\n self.assertEqual(response.status_code, 200)",
"def test_delete(self):\n\n url = reverse('file')\n\n data = {}\n\n self.client.force_authenticate(user=self.test_user_obj)\n response = self.client.delete(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)",
"def test_delete():\n sample_uuid = get_sample_id()\n response = requests.delete(f'http://localhost:5000/api/persons/{sample_uuid}')\n\n assert response.status_code == 200",
"def test_delete_author_logged(self):\n self.client.force_authenticate(user=self.user)\n\n request = self.client.delete(self.epoint)\n self.assertEqual(request.status_code, status.HTTP_204_NO_CONTENT)",
"def test_delete_object(self):\n u = self.d.user('example')\n u.delete()\n\n method, url, data, headers = self.d._fetcher.last_request\n self.assertEqual(method, 'DELETE')\n self.assertEqual(url, '/users/example')",
"def test_post_delete_logged_in(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n self.client.force_authenticate(user=self.user)\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)"
]
| [
"0.78405344",
"0.7798382",
"0.7431772",
"0.73222965",
"0.73197466",
"0.7210531",
"0.71509945",
"0.71504116",
"0.71398807",
"0.71240693",
"0.71109575",
"0.7097844",
"0.7097153",
"0.70948076",
"0.705479",
"0.70487773",
"0.7041551",
"0.70338",
"0.700916",
"0.6971044",
"0.6954457",
"0.69310427",
"0.69267076",
"0.6906557",
"0.68998605",
"0.6891654",
"0.68904096",
"0.68866456",
"0.6879388",
"0.6872969"
]
| 0.80400723 | 0 |
Tests DELETE method on ga without a ga id | def test_delete_ga_failure_no_ga_id(self):
url = reverse('admin_google_authenticator')
data = {
}
self.client.force_authenticate(user=self.admin)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_delete_ga_failure_ga_id_not_exist(self):\n\n url = reverse('admin_google_authenticator')\n\n data = {\n 'google_authenticator_id': '499d3c84-e8ae-4a6b-a4c2-43c79beb069a'\n }\n\n self.client.force_authenticate(user=self.admin)\n response = self.client.delete(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_delete_ga_success(self):\n\n url = reverse('admin_google_authenticator')\n\n data = {\n 'google_authenticator_id': self.ga.id\n }\n\n self.client.force_authenticate(user=self.admin)\n response = self.client.delete(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n self.assertEqual(models.Google_Authenticator.objects.all().count(), 0)",
"def test_delete_ga_failure_no_admin(self):\n\n url = reverse('admin_google_authenticator')\n\n data = {\n 'google_authenticator_id': self.ga.id\n }\n\n self.client.force_authenticate(user=self.test_user_obj)\n response = self.client.delete(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def delete(self, _id):",
"def test_delete():\n sample_uuid = get_sample_id()\n response = requests.delete(f'http://localhost:5000/api/persons/{sample_uuid}')\n\n assert response.status_code == 200",
"def test_delete(self):\n pass",
"def test_deleting_goal(self):\n\n delete_goal(1)\n self.assertIsNone(Goal.query.get(1))",
"def test_delete_goal(self):\n pass",
"def test_delete_alert_by_id(self):\n pass",
"def delete(self):\n return self.request('', pylastica.request.Request.DELETE)",
"def test_data_source_soaps_id_delete(self):\n pass",
"def test_client_verification_document_delete(self):\n pass",
"def test_deleting_patient_goals(self):\n\n data = {\"goal\": 1}\n result = self.client.post(\"/delete-goal\", data=data)\n goal = Goal.query.get(1)\n\n self.assertEqual(result.status_code, 200)\n self.assertIsNone(goal)",
"def test_delete_o_auth_client(self):\n pass",
"def test_delete_method(self):\n self.getPage('/blah', method='PUT')\n self.getPage('/', method='DELETE')\n self.assertStatus('204 No Content')\n self.assertHeader('Content-Type', 'application/json')",
"def simulate_delete(app, path, **kwargs) -> _ResultBase:\n return simulate_request(app, 'DELETE', path, **kwargs)",
"def test_request_do_delete_non_existent_id(test_dao, test_configuration):\r\n DUT = dtcFunction(test_dao, test_configuration, test=True)\r\n DUT.request_do_select_all(revision_id=1)\r\n\r\n assert DUT.request_do_delete(100)",
"def test_delete(self):\n responses.add(\n responses.Response(\n method='DELETE',\n url='https://connection.keboola.com/v2/storage/buckets/1?force=False&async=False',\n json={}\n )\n )\n bucket_id = '1'\n deleted_detail = self.buckets.delete(bucket_id, asynchronous=False)\n assert deleted_detail is None",
"def test_DELETE(self):\n if not self.url:\n return\n response = self.client.delete(self.url, {}, format='json')\n self.assertIn(response.status_code, [status.HTTP_405_METHOD_NOT_ALLOWED,\n status.HTTP_401_UNAUTHORIZED])",
"def _delete(self, *args, **kwargs):\n return self._request('delete', *args, **kwargs)",
"def test_delete_device_by_id(self):\n pass",
"def test_delete_with_bad_id(self):\n resp = self.api_client.delete('/api/metadata/tracks/100000/')\n data = json.loads(resp.content)\n\n # Ensure the request filed with a 404, and an error message is returned\n self.assertEqual(resp.status_code, 404)\n self.assertEqual(data['detail'], u'The record could not be found.')",
"def test_delete_car_invalid_id():\n response = client.delete(\"/11111\")\n assert response.status_code == STATUS_NOT_FOUND",
"def delete(cls, id):\n raise Exception('Not Implemented Yet')",
"def delete(thing, id_):\n pass",
"def test_user_id_delete(self):\n pass",
"def test_event_deleted(dummy_regform, api_delete, api_post):\n dummy_regform.event.delete('Unit tests')\n assert api_delete.call_count == 1\n assert api_post.call_count == 0",
"def test_event_deleted(dummy_regform, api_delete, api_post):\n dummy_regform.event.delete('Unit tests')\n assert api_delete.call_count == 1\n assert api_post.call_count == 0",
"def test_delete_item_incorrect_id(test_client):\n\n response = test_client.delete(GOOD_ITEM_URL)\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND",
"def test_delete(self):\n activity = Activity.objects.first()\n url, parsed = self.prepare_urls('v1:activity-detail', subdomain=self.company.subdomain, kwargs={'pk': activity.id})\n \n response = self.client.delete(url, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.delete(url, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n url, parsed = self.prepare_urls('v1:activity-list', subdomain=self.company.subdomain)\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n content = json.loads(response.content)\n self.assertEqual(self.activities_count-1, content['count']) # deleted 1 activity"
]
| [
"0.75935537",
"0.73303866",
"0.72282845",
"0.7068529",
"0.6912258",
"0.67213583",
"0.6707342",
"0.6631806",
"0.66277874",
"0.6619722",
"0.66181743",
"0.65358365",
"0.6516876",
"0.6516742",
"0.6514221",
"0.6489775",
"0.6468499",
"0.64639056",
"0.6462968",
"0.64613545",
"0.6457326",
"0.64476407",
"0.6442231",
"0.6442068",
"0.6426515",
"0.64251095",
"0.64217067",
"0.64217067",
"0.6418464",
"0.641407"
]
| 0.7927357 | 0 |
Tests DELETE method on ga with a ga id that does not exist | def test_delete_ga_failure_ga_id_not_exist(self):
url = reverse('admin_google_authenticator')
data = {
'google_authenticator_id': '499d3c84-e8ae-4a6b-a4c2-43c79beb069a'
}
self.client.force_authenticate(user=self.admin)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_delete_ga_failure_no_ga_id(self):\n\n url = reverse('admin_google_authenticator')\n\n data = {\n }\n\n self.client.force_authenticate(user=self.admin)\n response = self.client.delete(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_delete_ga_success(self):\n\n url = reverse('admin_google_authenticator')\n\n data = {\n 'google_authenticator_id': self.ga.id\n }\n\n self.client.force_authenticate(user=self.admin)\n response = self.client.delete(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n self.assertEqual(models.Google_Authenticator.objects.all().count(), 0)",
"def test_delete_ga_failure_no_admin(self):\n\n url = reverse('admin_google_authenticator')\n\n data = {\n 'google_authenticator_id': self.ga.id\n }\n\n self.client.force_authenticate(user=self.test_user_obj)\n response = self.client.delete(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def delete(self, _id):",
"def test_delete_with_bad_id(self):\n resp = self.api_client.delete('/api/metadata/tracks/100000/')\n data = json.loads(resp.content)\n\n # Ensure the request filed with a 404, and an error message is returned\n self.assertEqual(resp.status_code, 404)\n self.assertEqual(data['detail'], u'The record could not be found.')",
"def test_delete_item_incorrect_id(test_client):\n\n response = test_client.delete(GOOD_ITEM_URL)\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND",
"def test_delete():\n sample_uuid = get_sample_id()\n response = requests.delete(f'http://localhost:5000/api/persons/{sample_uuid}')\n\n assert response.status_code == 200",
"def test_delete_car_invalid_id():\n response = client.delete(\"/11111\")\n assert response.status_code == STATUS_NOT_FOUND",
"def test_deleting_goal(self):\n\n delete_goal(1)\n self.assertIsNone(Goal.query.get(1))",
"def test_delete_not_found(self):\n resp = self.client.delete(\n \"/tracking?repo=not_found1&branch=not_found1\", content_type=\"application/json\", headers=self.auth\n )\n resp_dict = json.loads(resp.data)\n self.assertIn(\"code\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(ResponseCode.DELETE_DB_NOT_FOUND, resp_dict.get(\"code\"), msg=\"Error in status code return\")",
"def test_request_do_delete_non_existent_id(test_dao, test_configuration):\r\n DUT = dtcFunction(test_dao, test_configuration, test=True)\r\n DUT.request_do_select_all(revision_id=1)\r\n\r\n assert DUT.request_do_delete(100)",
"def test_data_source_soaps_id_delete(self):\n pass",
"def test_delete__not_found(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with register.app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.NotFound):\n self.handler.do_delete(self.feature_id + 1)\n\n revised_feature = models.Feature.get_by_id(self.feature_id)\n self.assertFalse(revised_feature.deleted)",
"def test_delete_alert_by_id(self):\n pass",
"def test_delete_non_notification_id(self):\n\n url = reverse('notification', kwargs={'way_id': self.notification.way_id})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, 400)",
"def test_deleting_patient_goals(self):\n\n data = {\"goal\": 1}\n result = self.client.post(\"/delete-goal\", data=data)\n goal = Goal.query.get(1)\n\n self.assertEqual(result.status_code, 200)\n self.assertIsNone(goal)",
"def test_delete_nonexist(self):\n promotion = PromotionFactory()\n promotion.id = '1cak41-nonexist'\n try:\n promotion.delete()\n except KeyError:\n self.assertRaises(KeyError)",
"def test_delete_user_by_id_non_admin(client: FlaskClient, db_session) -> None:\n username = create_random_username()\n populate_database_with_users(db_session, username)\n auth_token = create_auth_token(username)\n response = delete_user(client, username, auth_token.signed)\n assert response.status_code == HTTPStatus.NO_CONTENT\n assert response.content_length is None\n assert GifSyncUser.get_by_username(username) is None",
"def test_delete_user_by_id_non_existent(client: FlaskClient) -> None:\n username = create_random_username()\n auth_token = create_auth_token(username)\n response = delete_user(client, username, auth_token.signed)\n assert_error_response(response, HTTPStatus.NOT_FOUND)",
"def test_that_when_dataset_is_deleted_the_account_is_still_there(self):\n test_dataset = Dataset.objects.get(\n dataset_slug=\"google-geojson-example\")\n test_dataset.delete()\n with self.assertRaises(ObjectDoesNotExist):\n Dataset.objects.get(dataset_slug=\"google-geojson-example\")\n Account.objects.get(account_slug=\"test_user\")",
"def test_AlgorithmsIdHandler_DELETE_NotFound(self):\n searched_id = 'xyz1'\n right_list = []\n create_test_algorithm_list(right_list, 101)\n documents = []\n create_test_documents_list(right_list, documents, 101)\n index = search.Index(name=search_algorithm._INDEX_STRING)\n index.put(documents)\n # end of preparing data\n self.assertIsNone(index.get(searched_id), msg='Algorithm is there but should not be')\n response = self.testapp.delete('/algorithms/' + searched_id)\n self.assertEqual(200, response.status_int, msg='Wrong return code')\n self.assertIsNone(index.get(searched_id), msg='Algorithm is still there')",
"def test_delete(self):\n pass",
"def test_delete_non_existent_campaign_fails(self):\n response = self.client.delete(\n f\"{self.endpoint_url}99/\", headers={\"Authorization\": self.admin_token}\n )\n response_body = response.get_json()\n error_details = response_body[\"error\"]\n self.assertEqual(response.status_code, 404)\n self.assertEqual(error_details[\"message\"], CAMPAIGN_NOT_FOUND_MESSAGE)\n self.assertEqual(error_details[\"sub_code\"], CAMPAIGN_NOT_FOUND_SUB_CODE)",
"def test_delete(self):\n responses.add(\n responses.Response(\n method='DELETE',\n url='https://connection.keboola.com/v2/storage/buckets/1?force=False&async=False',\n json={}\n )\n )\n bucket_id = '1'\n deleted_detail = self.buckets.delete(bucket_id, asynchronous=False)\n assert deleted_detail is None",
"def test_delete_device_by_id(self):\n pass",
"def delete(cls, id):\n raise Exception('Not Implemented Yet')",
"def test_delete_goal(self):\n pass",
"def test_delete_account(self):\n id = Account.objects.first().id\n url = reverse('account:accounts-detail', kwargs={\"id\":id})\n data = {}\n response = self.client.delete(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Account.objects.count(), 0)",
"def test_delete_another_way_id(self):\n\n url = reverse('notification',\n kwargs={'way_id': 101, 'notification_id': self.notification.id})\n response = self.client.delete(url)\n\n self.assertEqual(response.status_code, 403)",
"def test_do_delete_non_existent_id(test_dao):\r\n DUT = dtmFunction(test_dao, test=True)\r\n DUT.do_select_all(revision_id=1)\r\n\r\n _error_code, _msg = DUT.do_delete(300)\r\n\r\n assert _error_code == 2005\r\n assert _msg == (\"RAMSTK ERROR: Attempted to delete non-existent \"\r\n \"Function ID 300.\")"
]
| [
"0.7988406",
"0.71753716",
"0.7135444",
"0.6940979",
"0.67862177",
"0.67631596",
"0.67527014",
"0.67248595",
"0.6668205",
"0.65559757",
"0.655533",
"0.65383536",
"0.65380216",
"0.65101993",
"0.6438291",
"0.64287865",
"0.64131516",
"0.63993865",
"0.6388697",
"0.6383335",
"0.63812387",
"0.6381018",
"0.6378196",
"0.6374255",
"0.63734084",
"0.63569134",
"0.63245493",
"0.6323894",
"0.6281402",
"0.62798476"
]
| 0.7812305 | 1 |
Convert a quaternion into euler angles (roll, pitch, yaw) roll is rotation around x in radians (counterclockwise) pitch is rotation around y in radians (counterclockwise) yaw is rotation around z in radians (counterclockwise) | def euler_from_quaternion(x, y, z, w):
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + y * y)
roll_x = math.atan2(t0, t1)
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
pitch_y = math.asin(t2)
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
yaw_z = math.atan2(t3, t4)
return roll_x, pitch_y, yaw_z # in radians | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def euler_from_quaternion(self, quaternion):\n x = quaternion.x\n y = quaternion.y\n z = quaternion.z\n w = quaternion.w\n\n sinr_cosp = 2 * (w * x + y * z)\n cosr_cosp = 1 - 2 * (x * x + y * y)\n roll = np.arctan2(sinr_cosp, cosr_cosp)\n\n sinp = 2 * (w * y - z * x)\n pitch = np.arcsin(sinp)\n\n siny_cosp = 2 * (w * z + x * y)\n cosy_cosp = 1 - 2 * (y * y + z * z)\n yaw = np.arctan2(siny_cosp, cosy_cosp)\n\n return roll, pitch, yaw",
"def euler_from_quaternion(x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n \n t2 = +2.0 * (w * y - z * x)\n pitch_y = math.asin(t2)\n \n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n \n return roll_x, pitch_y, yaw_z # in radians",
"def euler_from_quaternion(x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n\n return roll_x, pitch_y, yaw_z # in radians",
"def quaternion_to_euler(q):\r\n W = q[0]\r\n X = q[1]\r\n Y = q[2]\r\n Z = q[3]\r\n\r\n # roll(x - axis rotation)\r\n sinr_cosp = +2.0 * (W * X + Y * Z)\r\n cosr_cosp = +1.0 - 2.0 * (X * X + Y * Y)\r\n roll = math.atan2(sinr_cosp, cosr_cosp)\r\n\r\n # pitch(y - axis rotation)\r\n sinp = +2.0 * (W * Y - Z * X)\r\n if abs(sinp) >= 1:\r\n pitch = np.copysign(math.pi / 2, sinp) # use 90 degrees if out of range\r\n else:\r\n pitch = math.asin(sinp)\r\n\r\n # yaw(z - axis rotation)\r\n siny_cosp = +2.0 * (W * Z + X * Y)\r\n cosy_cosp = +1.0 - 2.0 * (Y * Y + Z * Z)\r\n yaw = math.atan2(siny_cosp, cosy_cosp)\r\n\r\n return roll, pitch, yaw",
"def euler_to_quaternion(yaw, pitch, roll):\r\n cy = math.cos(yaw * 0.5)\r\n sy = math.sin(yaw * 0.5)\r\n cp = math.cos(pitch * 0.5)\r\n sp = math.sin(pitch * 0.5)\r\n cr = math.cos(roll * 0.5)\r\n sr = math.sin(roll * 0.5)\r\n w = cy * cp * cr + sy * sp * sr\r\n x = cy * cp * sr - sy * sp * cr\r\n y = sy * cp * sr + cy * sp * cr\r\n z = sy * cp * cr - cy * sp * sr\r\n return w, x, y, z",
"def euler_from_quaternion(self, x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n\n return roll_x, pitch_y, yaw_z # in radians",
"def euler_from_quaternion(x, y, z, w):\r\n\tt0 = +2.0 * (w * x + y * z)\r\n\tt1 = +1.0 - 2.0 * (x * x + y * y)\r\n\troll_x = math.atan2(t0, t1)\r\n\r\n\tt2 = +2.0 * (w * y - z * x)\r\n\tt2 = +1.0 if t2 > +1.0 else t2\r\n\tt2 = -1.0 if t2 < -1.0 else t2\r\n\tpitch_y = math.asin(t2)\r\n\r\n\tt3 = +2.0 * (w * z + x * y)\r\n\tt4 = +1.0 - 2.0 * (y * y + z * z)\r\n\tyaw_z = math.atan2(t3, t4)\r\n\r\n\treturn roll_x, pitch_y, yaw_z # in radians\r",
"def convert_euler_to_quaternion(roll, yaw, pitch):\n\n # roll (z), yaw (y), pitch (x)\n\n cy = math.cos(math.radians(roll) * 0.5)\n sy = math.sin(math.radians(roll) * 0.5)\n\n cp = math.cos(math.radians(yaw) * 0.5)\n sp = math.sin(math.radians(yaw) * 0.5)\n\n cr = math.cos(math.radians(pitch) * 0.5)\n sr = math.sin(math.radians(pitch) * 0.5)\n\n w = cy * cp * cr + sy * sp * sr\n x = cy * cp * sr - sy * sp * cr\n y = sy * cp * sr + cy * sp * cr\n z = sy * cp * cr - cy * sp * sr\n\n quat = np.array([w, x, y, z])\n quat = quat / np.linalg.norm(quat)\n return quat",
"def quaternion_to_angle(self, q):\n\tx, y, z, w = q.x, q.y, q.z, q.w\n\troll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n\treturn yaw",
"def quaternion_to_angle(q):\n x, y, z, w = q.x, q.y, q.z, q.w\n roll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n return yaw",
"def quat_to_euler(orientation):\n quaternion = (\n orientation.x,\n orientation.y,\n orientation.z,\n orientation.w\n )\n euler = tf.transformations.euler_from_quaternion(quaternion)\n roll = euler[0]\n pitch = euler[1]\n yaw = euler[2]\n return (roll,pitch,yaw)",
"def quaternion_to_angle(q):\n\tx, y, z, w = q.x, q.y, q.z, q.w\n\troll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n\treturn yaw",
"def euler_to_quat(roll, pitch, yaw):\n pose = Pose()\n quaternion = tf.transformations.quaternion_from_euler(roll, pitch, yaw)\n pose.orientation.x = quaternion[0]\n pose.orientation.y = quaternion[1]\n pose.orientation.z = quaternion[2]\n pose.orientation.w = quaternion[3]\n return pose.orientation",
"def euler_angles(quatX,quatY,quatZ,quatW):\n\n\troll1 = 2.0 * (quatW * quatX + quatY * quatZ)\n\troll2 = (1.0 - 2.0) * (quatX * quatX + quatY * quatY)\n\n\tyaw1 = 2.0 * (quatW * quatZ + quatX * quatY)\n\tyaw2 = 1.0 - 2.0 * (quatY * quatY + quatZ * quatZ)\n\n\troll = math.atan2(roll1,roll2)\n\tpitch = math.asin(max(-1.0, min(1.0, 2.0 *(quatW * quatY - quatZ * quatX))))\n\tyaw = math.atan2(yaw1,yaw2)\n\n\troll_w = int(((roll + (math.pi)) / (math.pi * 2.0) * 18))\n\tpitch_w = int(pitch + (math.pi/2.0)/math.pi * 18)\n\tyaw_w = int(yaw + (math.pi / (math.pi * 2.0)) * 18)\n\n\teulerAngles = [roll_w,pitch_w,yaw_w]\n\treturn eulerAngles",
"def euler_to_quaternion(euler: tuple) -> object:\n\n (yaw, pitch, roll) = (euler[0], euler[1], euler[2])\n qy = np.sin(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) - np.cos(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)\n qx = np.cos(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2)\n qw = np.cos(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2) - np.sin(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2)\n qz = np.cos(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)\n return qx, qy, qz, qw",
"def get_euler_angle_from_quat(w, x, y, z):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n alpha = math.atan2(t0, t1) * 180 / math.pi\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n beta = math.asin(t2) * 180 / math.pi\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n gamma = math.atan2(t3, t4) * 180 / math.pi\n return alpha, beta, gamma",
"def toEulerAngle(w, x, y, z):\n # roll (x-axis rotation)\n sinr = +2.0 * (w * x + y * z)\n cosr = +1.0 - 2.0 * (x * x + y * y)\n roll = math.atan2(sinr, cosr)\n # pitch (y-axis rotation)\n sinp = +2.0 * (w * y - z * x)\n if (math.fabs(sinp) >= 1):\n pitch = math.copysign(math.pi / 2, sinp) # use 90 degrees if out of range\n else:\n pitch = math.asin(sinp)\n # yaw (z-axis rotation)\n siny = +2.0 * (w * z + x * y)\n cosy = +1.0 - 2.0 * (y * y + z * z)\n yaw = math.atan2(siny, cosy)\n return roll, pitch, yaw",
"def to_quaternion(roll = 0.0, pitch = 0.0, yaw = 0.0):\n\tt0 = math.cos(math.radians(yaw * 0.5))\n\tt1 = math.sin(math.radians(yaw * 0.5))\n\tt2 = math.cos(math.radians(roll * 0.5))\n\tt3 = math.sin(math.radians(roll * 0.5))\n\tt4 = math.cos(math.radians(pitch * 0.5))\n\tt5 = math.sin(math.radians(pitch * 0.5))\n\n\tw = t0 * t2 * t4 + t1 * t3 * t5\n\tx = t0 * t3 * t4 - t1 * t2 * t5\n\ty = t0 * t2 * t5 + t1 * t3 * t4\n\tz = t1 * t2 * t4 - t0 * t3 * t5\n\n\treturn [w, x, y, z]",
"def to_quaternion(roll = 0.0, pitch = 0.0, yaw = 0.0):\n t0 = math.cos(math.radians(yaw * 0.5))\n t1 = math.sin(math.radians(yaw * 0.5))\n t2 = math.cos(math.radians(roll * 0.5))\n t3 = math.sin(math.radians(roll * 0.5))\n t4 = math.cos(math.radians(pitch * 0.5))\n t5 = math.sin(math.radians(pitch * 0.5))\n\n w = t0 * t2 * t4 + t1 * t3 * t5\n x = t0 * t3 * t4 - t1 * t2 * t5\n y = t0 * t2 * t5 + t1 * t3 * t4\n z = t1 * t2 * t4 - t0 * t3 * t5\n\n return [w, x, y, z]",
"def to_quaternion(roll = 0.0, pitch = 0.0, yaw = 0.0):\n t0 = math.cos(math.radians(yaw * 0.5))\n t1 = math.sin(math.radians(yaw * 0.5))\n t2 = math.cos(math.radians(roll * 0.5))\n t3 = math.sin(math.radians(roll * 0.5))\n t4 = math.cos(math.radians(pitch * 0.5))\n t5 = math.sin(math.radians(pitch * 0.5))\n\n w = t0 * t2 * t4 + t1 * t3 * t5\n x = t0 * t3 * t4 - t1 * t2 * t5\n y = t0 * t2 * t5 + t1 * t3 * t4\n z = t1 * t2 * t4 - t0 * t3 * t5\n\n return [w, x, y, z]",
"def to_quaternion(self, roll=0.0, pitch=0.0, yaw=0.0):\n t0 = math.cos(math.radians(yaw * 0.5))\n t1 = math.sin(math.radians(yaw * 0.5))\n t2 = math.cos(math.radians(roll * 0.5))\n t3 = math.sin(math.radians(roll * 0.5))\n t4 = math.cos(math.radians(pitch * 0.5))\n t5 = math.sin(math.radians(pitch * 0.5))\n\n w = t0 * t2 * t4 + t1 * t3 * t5\n x = t0 * t3 * t4 - t1 * t2 * t5\n y = t0 * t2 * t5 + t1 * t3 * t4\n z = t1 * t2 * t4 - t0 * t3 * t5\n\n return [w, x, y, z]",
"def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:\n if not torch.is_tensor(quaternion):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(\n type(quaternion)))\n\n if not quaternion.shape[-1] == 4:\n raise ValueError(\"Input must be a tensor of shape Nx4 or 4. Got {}\"\n .format(quaternion.shape))\n # unpack input and compute conversion\n q1: torch.Tensor = quaternion[..., 1]\n q2: torch.Tensor = quaternion[..., 2]\n q3: torch.Tensor = quaternion[..., 3]\n sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3\n\n sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta)\n cos_theta: torch.Tensor = quaternion[..., 0]\n two_theta: torch.Tensor = 2.0 * torch.where(\n cos_theta < 0.0,\n torch.atan2(-sin_theta, -cos_theta),\n torch.atan2(sin_theta, cos_theta))\n\n k_pos: torch.Tensor = two_theta / sin_theta\n k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta)\n k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg)\n\n angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3]\n angle_axis[..., 0] += q1 * k\n angle_axis[..., 1] += q2 * k\n angle_axis[..., 2] += q3 * k\n return angle_axis",
"def euler_to_quat(self, yaw):\n quat_array = t.quaternion_from_euler(0.0, 0.0, yaw)\n return Quaternion(quat_array[0], quat_array[1], quat_array[2], quat_array[3])",
"def rpy_from_quaternion(quaternion):\n (yaw, pitch, roll) = quaternion.yaw_pitch_roll\n return (roll, pitch, yaw)",
"def quaternion_to_angle_axis(quaternion):\n ha = np.arccos(quaternion[0])\n theta = 2 * ha\n if theta < np.finfo(float).eps:\n theta = 0\n axis = np.array([1, 0, 0])\n else:\n axis = quaternion[[1, 2, 3]] / np.sin(ha)\n return theta, axis",
"def quat_to_yaw_deg(qx,qy,qz,qw):\n degree = pi/180\n sqy = qy*qy\n sqz = qz*qz\n siny = 2 * (qw*qz+qx*qy)\n cosy = 1 - 2*(qy*qy+qz*qz)\n yaw = int(atan2(siny,cosy)/degree)\n return yaw",
"def to_quaternion(self,roll=0.0, pitch=0.0, yaw=0.0):\n t0 = math.cos(math.radians(yaw * 0.5))\n t1 = math.sin(math.radians(yaw * 0.5))\n t2 = math.cos(math.radians(roll * 0.5))\n t3 = math.sin(math.radians(roll * 0.5))\n t4 = math.cos(math.radians(pitch * 0.5))\n t5 = math.sin(math.radians(pitch * 0.5))\n\n w = t0 * t2 * t4 + t1 * t3 * t5\n x = t0 * t3 * t4 - t1 * t2 * t5\n y = t0 * t2 * t5 + t1 * t3 * t4\n z = t1 * t2 * t4 - t0 * t3 * t5\n\n return [w, x, y, z]",
"def euler_from_quaternion(quaternion, axes='sxyz'):\r\n return euler_from_matrix(quaternion_matrix(quaternion), axes)",
"def quaternion_to_RPY(q: array):\n\n roll: float = arctan2(2 * (q[0] * q[1] + q[2] * q[3]), 1 - (2 * (power(q[1], 2) + power(q[2], 2))))\n pitch: float = arcsin(2 * (q[0] * q[2] - q[3] * q[1]))\n yaw: float = arctan2(2 * (q[0] * q[3] + q[1] * q[2]), 1 - (2 * (power(q[2], 2) + power(q[3], 2))))\n\n return roll, pitch, yaw",
"def toQuat(roll_pitch_yaw):\n cos_r = np.cos(roll_pitch_yaw[0] * 0.5)\n sin_r = np.sin(roll_pitch_yaw[0] * 0.5)\n cos_p = np.cos(roll_pitch_yaw[1] * 0.5)\n sin_p = np.sin(roll_pitch_yaw[1] * 0.5)\n cos_y = np.cos(roll_pitch_yaw[2] * 0.5)\n sin_y = np.sin(roll_pitch_yaw[2] * 0.5)\n w = cos_y * cos_p * cos_r + sin_y * sin_p * sin_r\n x = cos_y * cos_p * sin_r - sin_y * sin_p * cos_r\n y = sin_y * cos_p * sin_r + cos_y * sin_p * cos_r\n z = sin_y * cos_p * cos_r - cos_y * sin_p * sin_r\n return np.array([w, x, y, z])"
]
| [
"0.8658502",
"0.8394331",
"0.8320644",
"0.82789195",
"0.82181174",
"0.81303775",
"0.8088952",
"0.80464375",
"0.7996021",
"0.7938749",
"0.7929835",
"0.7912955",
"0.78689206",
"0.7632398",
"0.74772036",
"0.7472176",
"0.7432922",
"0.7294737",
"0.7286224",
"0.7286224",
"0.72656506",
"0.72427815",
"0.7223147",
"0.7210825",
"0.7168258",
"0.7166645",
"0.7133707",
"0.71199185",
"0.71185607",
"0.7077495"
]
| 0.8440425 | 1 |
Create / get S3 bucket for tests | def s3_bucket(s3_resource, s3_client, account_id, boto_session):
region_name = boto_session.region_name
bucket_name = f"amazon-braket-sdk-integ-tests-{account_id}"
bucket = s3_resource.Bucket(bucket_name)
try:
# Determine if bucket exists
s3_client.head_bucket(Bucket=bucket_name)
except ClientError as e:
error_code = e.response["Error"]["Code"]
if error_code == "404":
bucket.create(
ACL="private", CreateBucketConfiguration={"LocationConstraint": region_name}
)
return bucket_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_bucket(self):\n pass",
"def mock_s3_bucket():\n with moto.mock_s3():\n bucket_name = \"mock-bucket\"\n my_config = Config(region_name=\"us-east-1\")\n s3_client = boto3.client(\"s3\", config=my_config)\n s3_client.create_bucket(Bucket=bucket_name)\n yield bucket_name",
"def s3_create_bucket(self):\n self.conn.create_bucket(DEFAULT_BUCKET_NAME)",
"def mock_s3_bucket():\n import boto3\n import moto\n\n with moto.mock_s3():\n bucket_name = \"mock-bucket\"\n s3_client = boto3.client(\"s3\")\n s3_client.create_bucket(Bucket=bucket_name)\n yield bucket_name",
"def test_creating_a_bucket(self):\n with self.client:\n self.create_bucket(self.get_user_token())",
"def test_create_bucket(self, boto_mock):\n conn = boto_mock.s3.connect_to_region()\n boto_mock.exception.S3ResponseError = boto.exception.S3ResponseError\n\n def raise_not_found(*_, **__):\n \"\"\" Raise a 'bucket not found' exception \"\"\"\n e = boto.exception.S3ResponseError(400, 'missing')\n e.error_code = 'NoSuchBucket'\n raise e\n conn.get_bucket = raise_not_found\n settings = {\n 'storage.bucket': 'new_bucket',\n 'storage.region': 'us-east-1',\n }\n S3Storage.configure(settings)\n conn.create_bucket.assert_called_with('new_bucket',\n location='us-east-1')",
"def setup_bucket_wo_contents(mock_s3_client, test_bucket):\n s3 = boto3.client('s3')\n s3.create_bucket(Bucket=test_bucket)\n\n yield",
"def create_sam_bucket():\n local(f\"aws s3 mb s3://{env.bucket_name} --region {env.aws_region}\")",
"def s3_bucket(s3_server): # pylint: disable=redefined-outer-name\n client = s3_server.get_s3_client()\n bucket_name = text_type(uuid.uuid4())\n client.create_bucket(Bucket=bucket_name)\n return BucketInfo(client, bucket_name)",
"def CreateS3Bucket(self):\n bucketFound = False\n region = \"eu-west-1\"\n try: # Check if bucket exists\n client.head_bucket(Bucket=self.bucketName)\n bucketFound = True\n s3Log.info (\"Bucket \\'{}\\' Exists! \".format(self.bucketName))\n except ClientError as e: # Bucket Does not exist\n if e.response[\"Error\"][\"Message\"] == \"Not Found\":\n s3Log.info(\"Bucket \\'{}\\' does not exist!\".format(self.bucketName))\n\n if bucketFound == 0: #since bucket does not exist, we ought to create it\n s3Log.info(\"Creating Bucket \\'{}\\' in region={}\".format(self.bucketName, region))\n try:\n bucket_response = client.create_bucket(Bucket=self.bucketName,\n CreateBucketConfiguration={\n 'LocationConstraint': region})\n bucketFound = True\n except ClientError as e:\n s3Log.error(\"FATAL ERROR: Unable to create bucket \\'{}\\' {}\".format(self.bucketName, e))\n sys.exit(1)\n\n\n return bucketFound",
"def test_get_buckets(self):\n conn = boto3.resource('s3', region_name='us-east-1')\n # We need to create the bucket since this is all in Moto's 'virtual' AWS account\n conn.create_bucket(Bucket='foobucket')\n\n s3_connector = S3Connector()\n s3_connector.connect(\"default\")\n self.assertEqual(s3_connector.get_buckets(), [\"foobucket\"])",
"def test_get_bucket(self):\n pass",
"def make_s3(sitename):\n return s3.S3(sitename)",
"def boto_init_s3(bucket_name):\n c = boto.connect_s3(aws_access_key_id=settings.AWS_ACCESS_KEY_ID,\n aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY)\n b = c.get_bucket(bucket_name)\n\n return b",
"def create_bucket(s3, bucket_name):\n \n try:\n s3.create_bucket(Bucket=bucket_name)\n except ClientError as e:\n print(f'ERROR: {e}')\n return False\n return True",
"def test_init_auto_create_bucket(self):\n access_key = 'fake_key'\n secret_key = 'fake_secret'\n bucket_name = 'fake_bucket'\n location = 'anywhere'\n\n with patch('boto.connect_s3') as mock_connect_s3:\n mock_error = S3ResponseError(999, \"Failed\")\n mock_error.error_code = \"NoSuchBucket\"\n mock_conn = Mock()\n mock_conn.get_bucket.side_effect = mock_error\n\n mock_connect_s3.return_value = mock_conn\n\n S3Backend(access_key=access_key, secret_key=secret_key,\n bucket_name=bucket_name, s3_location=location)\n\n mock_connect_s3.assert_called_once_with(access_key, secret_key)\n mock_conn.get_bucket.assert_called_once_with(bucket_name)\n\n mock_conn.create_bucket.assert_called_once_with(bucket_name, location=location)",
"def create_bucket(bucket_name, KEY, SECRET, region=None):\n ## Creating the bucket \n try:\n if region is None:\n s3_client = boto3.client('s3', \n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET)\n s3_client.create_bucket(Bucket=bucket_name)\n else:\n s3_client = boto3.client('s3', \n region_name=region,\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET)\n location = {'LocationConstraint': region}\n s3_client.create_bucket(Bucket=bucket_name,\n CreateBucketConfiguration=location)\n except ClientError as e:\n logging.error(e)\n print('Could not create')\n exit()\n \n print('************************************')\n print('Create S3 Client')\n print('************************************')\n return s3_client",
"def create_bucket(bucket: BucketLocation) -> None:\n try:\n if bucket.region is None:\n s3_client = boto3.client(\"s3\")\n s3_client.create_bucket(Bucket=bucket.name)\n else:\n s3_client = boto3.client(\"s3\", region_name=bucket.region)\n location = {\"LocationConstraint\": bucket.region}\n s3_client.create_bucket(\n Bucket=bucket.name, CreateBucketConfiguration=location\n )\n except ClientError as e:\n log.error(f\"Failed to create bucket {bucket!r}. {e}\")\n raise",
"def bucket_exists(gs_client, test_bucket):\n bucket = gs_client.conn.bucket(test_bucket)\n if not bucket.exists():\n gs_client.conn.create_bucket(test_bucket, predefined_acl=\"project-private\")\n yield gs_client",
"def mock_xpro_learning_bucket(\n xpro_aws_settings, mock_s3_fixture\n): # pylint: disable=unused-argument\n s3 = boto3.resource(\n \"s3\",\n aws_access_key_id=xpro_aws_settings.AWS_ACCESS_KEY_ID,\n aws_secret_access_key=xpro_aws_settings.AWS_SECRET_ACCESS_KEY,\n )\n bucket = s3.create_bucket(Bucket=xpro_aws_settings.XPRO_LEARNING_COURSE_BUCKET_NAME)\n yield SimpleNamespace(s3=s3, bucket=bucket)",
"def create_bucket(request: Dict) -> Dict:\n global config\n\n body = {\n \"user_name\": request.get(\"user_name\"),\n \"prefix\": request.get(\"bucket_name\")[0:5],\n \"bucket_name\": request.get(\"bucket_name\"),\n \"region\": request.get(\"region\")\n }\n\n response = requests.post(url=config.api_url('bucket'),\n data=json.dumps(body),\n headers={'content-type': 'application/json'})\n\n if response.status_code == HTTPStatus.OK:\n return response.json()",
"def test_bucket_availability(self):\n s3 = boto3.resource('s3')\n bucket = s3.Bucket(app.config['S3_PHOTO_BUCKET'])\n exists = True\n try:\n s3.meta.client.head_bucket(Bucket=app.config['S3_PHOTO_BUCKET'])\n self.assertEqual(exists, True)\n except botocore.exceptions.ClientError as e:\n # If a client error is thrown, then check that it was a 404 error.\n # If it was a 404 error, then the bucket does not exist.\n error_code = e.response['Error']['Code']\n if error_code == '404':\n exists = False\n self.assertEqual(exists, True, msg='Bucket is not exist!')",
"def setup_buckets():\n s3 = boto.connect_s3()\n s3.create_bucket('mls_data.mls.angerilli.ca')",
"def create_bucket(bucket_name):\r\n\r\n # initialize client & get bucket\r\n storage_client, bucket, _ = create_client(bucket_name)\r\n\r\n # set storage class, by default STANDARD\r\n bucket.storage_class = \"COLDLINE\"\r\n\r\n # create new bucket\r\n new_bucket = storage_client.create_bucket(bucket, location='us-central1')\r\n\r\n # print new bucket detail\r\n print(vars(bucket))\r\n\r\n return None",
"def get_new_bucket(session=boto3, name=None, headers=None):\n s3 = session.resource('s3', \n use_ssl=False,\n verify=False,\n endpoint_url=endpoint_url, \n aws_access_key_id=access_key,\n aws_secret_access_key=secret_key)\n if name is None:\n name = get_new_bucket_name()\n bucket = s3.Bucket(name)\n bucket_location = bucket.create()\n return bucket",
"def create_bucket() -> None:\n try:\n client.make_bucket(DATASETS_BUCKET)\n except BucketAlreadyOwnedByYou:\n logger.debug(f\"Not creating bucket {DATASETS_BUCKET}: Bucket already exists\")\n pass\n else:\n logger.debug(f\"Successfully created bucket {DATASETS_BUCKET}\")",
"def test_bucket():\n return 'test_bucket'",
"def add(bucket_name, permissions=None, region_name=Location.EU):\n conn = connect_s3()\n conn.create_bucket(bucket_name, location=region_name)",
"def _create_s3_bucket_if_not_exist(self, prefix):\n account = self.boto_session.client(\"sts\").get_caller_identity()[\"Account\"]\n region = self.boto_session.region_name\n s3_bucket_name = \"{}-{}-{}\".format(prefix, region, account)\n\n s3 = self.boto_session.resource(\"s3\")\n s3_client = self.boto_session.client(\"s3\")\n try:\n # 'us-east-1' cannot be specified because it is the default region:\n # https://github.com/boto/boto3/issues/125\n if region == \"us-east-1\":\n s3.create_bucket(Bucket=s3_bucket_name)\n else:\n s3.create_bucket(\n Bucket=s3_bucket_name, CreateBucketConfiguration={\"LocationConstraint\": region}\n )\n logger.info(\n \"Successfully create S3 bucket '{}' for storing {} data\".format(\n s3_bucket_name, prefix\n )\n )\n except ClientError as e:\n error_code = e.response[\"Error\"][\"Code\"]\n message = e.response[\"Error\"][\"Message\"]\n\n if error_code == \"BucketAlreadyOwnedByYou\":\n pass\n elif (\n error_code == \"OperationAborted\" and \"conflicting conditional operation\" in message\n ):\n # If this bucket is already being concurrently created, we don't need to create it again.\n pass\n elif error_code == \"TooManyBuckets\":\n # Succeed if the default bucket exists\n s3.meta.client.head_bucket(Bucket=s3_bucket_name)\n else:\n raise\n\n s3_waiter = s3_client.get_waiter(\"bucket_exists\")\n s3_waiter.wait(Bucket=s3_bucket_name)\n return s3_bucket_name",
"def fixture_make_bucket(request):\n def _make_bucket(resource, bucket_name, region_name=None):\n if not region_name:\n region_name = resource.meta.client.meta.region_name\n\n bucket = resource.create_bucket(\n Bucket=bucket_name,\n CreateBucketConfiguration={\n 'LocationConstraint': region_name\n }\n )\n\n def fin():\n bucket.objects.delete()\n bucket.delete()\n request.addfinalizer(fin)\n\n return bucket\n\n return _make_bucket"
]
| [
"0.7903016",
"0.7897161",
"0.78822505",
"0.78671503",
"0.76673317",
"0.76400506",
"0.76379246",
"0.746405",
"0.74143255",
"0.73914135",
"0.73775566",
"0.7376731",
"0.7319354",
"0.73147637",
"0.7291668",
"0.72262007",
"0.7194341",
"0.7152316",
"0.7140391",
"0.70995533",
"0.7085167",
"0.70632017",
"0.7044625",
"0.70445615",
"0.70362705",
"0.7022814",
"0.7021502",
"0.6997391",
"0.6996838",
"0.69806194"
]
| 0.7979803 | 0 |
Create a dictionary with file attributes for FUSE. | def create_file_attributes(permissions, time, size):
return {
'st_mode': (stat.S_IFREG | permissions),
'st_ctime': time,
'st_mtime': time,
'st_atime': time,
'st_size': size,
'st_uid': os.getuid(),
'st_gid': os.getgid(),
'st_nlink': 1
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_file_hash_dict(cls, file, file_path):\n\n file_info = {}\n file_info['path'] = file_path\n file_info['hash'] = cls.get_256_hash_from_file(file_path)\n file_info['type'] = 'file'\n file_info['name'] = file\n file_info['perm'] = stat.S_IMODE(os.lstat(file_path).st_mode)\n\n return file_info",
"def _getstatdict_forcreate(self, mode):\n now = getnow()\n stat = {\n 'st_mode': mode,\n 'st_nlink': 1,\n 'st_uid': os.getuid(),\n 'st_gid': os.getgid(),\n 'st_size': 0,\n 'st_atime': now,\n 'st_mtime': now,\n 'st_ctime': now,\n }\n return stat",
"def info(self, fp):\n keys = (\n (\"cas.meta.compression\", CAS._convert_meta),\n (\"cas.meta.lib\", CAS._convert_meta),\n (\"cas.meta.fp_algo\", CAS._convert_meta),\n (\"cas.meta.orig_size\", CAS._convert_meta),\n (\"cas.refcount\", CAS._convert_refcount),\n )\n\n return {key: conv(self.ioctx.get_xattr(fp, key))\n for key, conv in keys}",
"def make_file_dict():\r\n fileDict = {'pageUrls': [],\r\n 'pageFileNames': [],\r\n 'pageIds': [],\r\n 'fileUrls': [],\r\n 'fileIds': [],\r\n 'fileNames': [],\r\n 'cssUrls': [],\r\n 'cssFileNames': [],\r\n 'imgUrls': [],\r\n 'imgFileNames': []}\r\n return fileDict",
"def extra_state_attributes(self):\n return {\"file_path\": self._file_path}",
"def create_directory_attributes(time):\n\n return {\n 'st_mode': (stat.S_IFDIR | 0o555),\n 'st_ctime': time,\n 'st_mtime': time,\n 'st_atime': time,\n 'st_uid': os.getuid(),\n 'st_gid': os.getgid(),\n 'st_nlink': 2\n }",
"def stat_file(self, path, info):\n return {}",
"def _GetStatAttribute(self):\n stat_attribute = attribute.StatAttribute()\n stat_attribute.device_number = self._fshfs_file_entry.device_number\n stat_attribute.group_identifier = self._fshfs_file_entry.group_identifier\n stat_attribute.inode_number = self._fshfs_file_entry.identifier\n stat_attribute.mode = self._fshfs_file_entry.file_mode\n stat_attribute.number_of_links = self._fshfs_file_entry.number_of_links\n stat_attribute.owner_identifier = self._fshfs_file_entry.owner_identifier\n stat_attribute.size = self._fshfs_file_entry.size\n stat_attribute.type = self.entry_type\n\n return stat_attribute",
"def get_attributes(self) -> Dict[str, str]:\n pass",
"def test_filesystem_can_get_attributes_of_file(self):\n time.time = MagicMock(return_value=time.time())\n self.index.photos_directory_exists = MagicMock(return_value=False)\n self.index.photos_file_exists = MagicMock(\n return_value=123000 # returns filesize\n )\n\n expected = {\n 'st_atime': time.time(),\n 'st_ctime': time.time(),\n 'st_gid': os.getgid(),\n 'st_mode': File('').ST_MODE,\n 'st_mtime': time.time(),\n 'st_size': 123000,\n 'st_uid': os.getuid(),\n }\n\n attr = self.filesystem._attributes(\n '/example.com/2019-01-13H20:00/index.png'\n )\n self.assertEqual(expected, attr)\n self.index.photos_file_exists.assert_called_with(\n domain='example.com',\n captured_at='2019-01-13H20:00',\n full_filename='/index.png',\n refresh_rate=self.refresh_rate\n )",
"def set_file_attr(self):\n if self.resolution == 1000:\n satellite_type = ['AQUA', 'TERRA']\n if self.satellite in satellite_type:\n try:\n h4r = SD(self.in_file, SDC.READ)\n self.file_attr = attrs2dict(h4r.attributes())\n except Exception as e:\n print(str(e))\n else:\n raise ValueError(\n 'Cant read this satellite`s data.: {}'.format(self.satellite))\n else:\n raise ValueError(\n \"Cant handle this resolution: \".format(self.resolution))",
"def _file_dict(self, fn_):\n if not os.path.isfile(fn_):\n err = \"The referenced file, {} is not available.\".format(fn_)\n sys.stderr.write(err + \"\\n\")\n sys.exit(42)\n with salt.utils.files.fopen(fn_, \"r\") as fp_:\n data = fp_.read()\n return {fn_: data}",
"def get_attributes(cls):\r\n return [Attribute('file'),\r\n Attribute('missing', None)]",
"def gen_meta(self, filename):\n nf_meta = {}\n nf_meta['filename'] = filename\n nf_meta['deleted'] = 0\n\n # http://stackoverflow.com/a/5297483\n nf_meta['key'] = hashlib.md5(str(filename).encode('utf-8')).hexdigest()\n self.log.debug(\"Note File Meta Key: %s\", nf_meta['key'])\n\n path = self.config.get_config('cfg_nt_path')\n\n # WARNING THIS IS PLATFORM SPECIFIC\n nf_meta['createdate'] = os.stat(path + \"/\" + filename).st_birthtime\n self.log.debug(\"Note File Meta Created: %s [%s]\", nf_meta['createdate'], time.ctime(nf_meta['createdate']))\n\n nf_meta['modifydate'] = os.stat(path + \"/\" + filename).st_mtime\n self.log.debug(\"Note File Meta Modified: %s [%s]\", nf_meta['modifydate'], time.ctime(nf_meta['modifydate']))\n\n return nf_meta",
"def to_dict(\n self,\n attributes: Iterable[str] = (\"xyz\", \"viewdir\", \"imgsz\", \"f\", \"c\", \"k\", \"p\"),\n ) -> Dict[str, tuple]:\n return {key: helpers.numpy_to_native(getattr(self, key)) for key in attributes}",
"def statfs(self, path, *args, **pargs):\n path, meta = self.find_cue_path(path)\n path = self.clean_path(path)\n stv = os.statvfs(path)\n return dict((key, getattr(stv, key)) for key in (\n 'f_bavail', 'f_bfree', 'f_blocks', 'f_bsize', 'f_favail',\n 'f_ffree', 'f_files', 'f_flag', 'f_frsize', 'f_namemax'))",
"def create_linkfile_dict(child):\n\n attrs = ['src', 'dest']\n values = [child.get(attr) for attr in attrs]\n\n if None in values:\n raise InvalidManifest(\n 'Missing required attribute in linkfile element'\n )\n\n return dict(zip(attrs, values))",
"def _GetStatAttribute(self):\n stat_attribute = attribute.StatAttribute()\n stat_attribute.inode_number = getattr(\n self._fsntfs_file_entry, 'file_reference', None)\n stat_attribute.number_of_links = getattr(\n self._fsntfs_file_entry, 'number_of_links', None)\n stat_attribute.size = getattr(self._fsntfs_file_entry, 'size', None)\n stat_attribute.type = self.entry_type\n\n return stat_attribute",
"def map_file_data(file_obj, file_events):\n file_as_dict = {\n \"premis:originalName\": file_obj.currentlocation,\n \"original_name\": escape(file_obj.originallocation),\n # needs investigation\n \"sanitized_file_name\": get_sanitized_file_name(\n get_file_name_cleanup(file_events)\n ),\n \"prov:generatedAtTime\": file_obj.modificationtime.strftime(\n \"%Y-%m-%dT%H:%M:%SZ\"\n ),\n \"premis:fixity\": {\n \"checksum_type\": convert_to_premis_hash_function(file_obj.checksumtype),\n \"Checksum\": file_obj.checksum,\n },\n \"premis:identifier\": file_obj.uuid,\n \"premis:size\": file_obj.size,\n \"file_name\": file_obj.label,\n # not sure if this is the file name or if we should stick with\n \"dct:FileFormat\": map_file_format_info(\n get_file_format_event(file_events), get_file_validation_event(file_events)\n ),\n \"file_validation\": map_file_validation_info(\n get_file_validation_event(file_events)\n ),\n \"file_normalization\": map_file_normalization_info(\n get_file_normalization_event(file_events)\n ),\n \"events\": list_file_events(file_events),\n }\n return file_as_dict",
"def device_state_attributes(self):\n attr = {\n 'folder': self._folder_path,\n 'filter': self._filter_term,\n 'recursive': self._recursive,\n 'number_of_files': self._number_of_files,\n 'bytes': self._size,\n 'last_added': self._last_added,\n 'last_deleted': self._last_deleted,\n 'last_modified': self._last_modified\n }\n return attr",
"def _get_attribute_dic(self, attrs):\n attr_dic = {}\n for attr_pair in attrs:\n attr_dic[attr_pair[0]] = attr_pair[1]\n return attr_dic",
"def filenames(self) -> dict[str, str]:\r\n ...",
"def testGetAttributes(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_FILE,\n location='/a_directory/a_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n self.assertIsNone(file_entry._attributes)\n\n file_entry._GetAttributes()\n self.assertIsNotNone(file_entry._attributes)\n self.assertEqual(len(file_entry._attributes), 1)\n\n test_attribute = file_entry._attributes[0]\n self.assertIsInstance(test_attribute, hfs_attribute.HFSExtendedAttribute)\n self.assertEqual(test_attribute.name, 'myxattr')\n\n test_attribute_value_data = test_attribute.read()\n self.assertEqual(test_attribute_value_data, b'My extended attribute')",
"def fileobject_to_dict(fo):\n if fo.allocated():\n # proc = subprocess.Popen(['./extract_strings', fo.inode()], stdout=subprocess.PIPE)\n # contents = proc.stdout.read()\n return {\n 'atime_dt': epoch_to_dt(fo.atime()),\n 'compressed_b': fo.compressed(),\n 'contents_t': string.translate(fo.contents(), filter),\n 'contents_display': string.translate(fo.contents(), filter),\n 'crtime_dt': epoch_to_dt(fo.crtime()),\n 'ctime_dt': epoch_to_dt(fo.ctime()),\n 'dtime_dt': epoch_to_dt(fo.dtime()),\n 'encrypted_b': fo.encrypted(),\n 'extension_facet': fo.ext(),\n 'fileid_i': int(fo._tags['id']),\n 'filename_display': fo.filename(),\n 'filename_t': fo.filename(),\n 'filesize_l': long(fo.filesize()),\n 'fragments_i': int(fo.fragments()),\n 'gid_i': int(fo._tags['gid']),\n #'id': uuid.uuid4(),\n 'id': hashlib.sha1(os.path.basename(IMAGE) + '_' + fo.inode()).hexdigest(),\n #'imagefile': fo._tags['imagefile'],\n 'inode_i': int(fo.inode()),\n 'libmagic_display': fo.libmagic(),\n 'libmagic_facet': fo.libmagic(),\n 'md5_s': fo.md5(),\n 'meta_type_i': fo._tags['meta_type'],\n 'mode_facet': int(fo._tags['mode']),\n 'mode_i': int(fo._tags['mode']),\n 'mtime_dt': epoch_to_dt(fo.mtime()),\n 'nlink_i': fo._tags['nlink'],\n 'name_type_s': fo.name_type(),\n 'partition_i': int(fo.partition()),\n 'sha1_s': fo.sha1(),\n 'uid_i': int(fo._tags['uid']),\n 'volume_display': IMAGE,\n 'volume_facet': os.path.basename(IMAGE)\n }\n else:\n return None",
"def get_attrib_dict(self, attribs: Tuple[str]) -> Dict[str, str]:\n attrs = self.get_attribs(attribs)\n attrs = tuple(map(lambda a: (a[0][1:], a[1]), attrs))\n return dict(attrs)",
"def project_files_attributes(self):\n _files = {}\n for k, v in self.attributes.workspace.items():\n if isinstance(v, str) and v.startswith('gs://'):\n _files[k] = v\n return _files",
"def extra_state_attributes(self):\n return {ATTR_ATTRIBUTION: ATTRIBUTION}",
"def attributes_metadata(self):\n\n attribute_meta = collections.defaultdict(dict)\n\n for attribute in self.attributes:\n attribute_meta[attribute.name]['valuemap'] = attribute.valuemap\n attribute_meta[attribute.name]['qualifiers'] = attribute.qualifiers\n\n return dict(attribute_meta)",
"def read_attributes(filename):\n attributes = {}\n with open(filename) as f:\n for line in f:\n # Split line into student, college, year, major\n fields = line.split()\n student = int(fields[0])\n college = int(fields[1])\n year = int(fields[2])\n major = int(fields[3])\n \n # Store student in the dictionary\n attributes[student] = {'college': college,\n 'year': year,\n 'major': major}\n return attributes",
"def _attribs(self, name=None, description=None):\n a = {}\n if name:\n a['name'] = name\n if description:\n a['description'] = description\n return a"
]
| [
"0.6876256",
"0.6477355",
"0.642789",
"0.63974553",
"0.6374335",
"0.636785",
"0.63562584",
"0.6264039",
"0.62063867",
"0.61930203",
"0.6149867",
"0.61277133",
"0.60900444",
"0.6011493",
"0.59660053",
"0.59634185",
"0.5940534",
"0.5900107",
"0.58942765",
"0.5891259",
"0.58804476",
"0.5872407",
"0.58221334",
"0.5813198",
"0.57971615",
"0.5791445",
"0.5785023",
"0.578079",
"0.57669336",
"0.57441103"
]
| 0.7342417 | 0 |
Create a dictionary with directory attributes for FUSE. | def create_directory_attributes(time):
return {
'st_mode': (stat.S_IFDIR | 0o555),
'st_ctime': time,
'st_mtime': time,
'st_atime': time,
'st_uid': os.getuid(),
'st_gid': os.getgid(),
'st_nlink': 2
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_filesystem_can_get_attributes_of_directory(self):\n time.time = MagicMock(return_value=time.time())\n self.index.photos_directory_exists = MagicMock(return_value=True)\n self.index.photos_unique_domains = MagicMock(\n return_value=['example.com']\n )\n self.index.photos_unique_captures_of_domain = MagicMock(\n return_value=['2019-01-13H20:00']\n )\n\n expected = {\n 'st_atime': time.time(),\n 'st_ctime': time.time(),\n 'st_gid': os.getgid(),\n 'st_mode': Directory('').ST_MODE,\n 'st_mtime': time.time(),\n 'st_size': 0,\n 'st_uid': os.getuid(),\n }\n\n attr = self.filesystem._attributes('/')\n self.assertEqual(expected, attr)\n\n attr = self.filesystem._attributes('/example.com/')\n self.assertEqual(expected, attr)\n\n attr = self.filesystem._attributes('/example.com/2019-01-13H20:00')\n self.assertEqual(expected, attr)\n\n attr = self.filesystem._attributes('/example.com/2019-01-13H20:00/')\n self.assertEqual(expected, attr)\n\n attr = self.filesystem._attributes(\n '/example.com/2019-01-13H20:00/foo/bar'\n )\n self.assertEqual(expected, attr)\n self.index.photos_directory_exists.assert_called_with(\n domain='example.com',\n captured_at='2019-01-13H20:00',\n directory='/foo/bar/',\n refresh_rate=self.refresh_rate\n )",
"def create_file_attributes(permissions, time, size):\n\n return {\n 'st_mode': (stat.S_IFREG | permissions),\n 'st_ctime': time,\n 'st_mtime': time,\n 'st_atime': time,\n 'st_size': size,\n 'st_uid': os.getuid(),\n 'st_gid': os.getgid(),\n 'st_nlink': 1\n }",
"def create_directory(self, directory: str) -> Dict:\n raise NotImplementedError",
"def _getstatdict_forcreate(self, mode):\n now = getnow()\n stat = {\n 'st_mode': mode,\n 'st_nlink': 1,\n 'st_uid': os.getuid(),\n 'st_gid': os.getgid(),\n 'st_size': 0,\n 'st_atime': now,\n 'st_mtime': now,\n 'st_ctime': now,\n }\n return stat",
"def device_state_attributes(self):\n attr = {\n 'folder': self._folder_path,\n 'filter': self._filter_term,\n 'recursive': self._recursive,\n 'number_of_files': self._number_of_files,\n 'bytes': self._size,\n 'last_added': self._last_added,\n 'last_deleted': self._last_deleted,\n 'last_modified': self._last_modified\n }\n return attr",
"def __init__(self, filepath):\n self.pathdict = {}\n self.filepath = filepath\n for dirname, dirs, files in os.walk(self.filepath):\n for f in files:\n self.pathdict[os.path.join(os.path.relpath(dirname, self.filepath), f)] = self.get_permissions(os.path.join(dirname, f))\n for d in dirs:\n self.pathdict[os.path.join(os.path.relpath(dirname, self.filepath), d)] = self.get_permissions(os.path.join(dirname, d))",
"def create_stat_dic(stat_id, data_directories):\n station_dic = {}\n total_dim = []\n for d,i in data_directories.items():\n files = os.listdir(i)\n for f in files:\n Id = f.split('_'+d)[0]\n if Id == stat_id:\n if d not in station_dic.keys():\n station_dic[d] = [] \n station_dic[d].append(i + '/' + f)\n \n total_dim. append( os.path.getsize (i + '/' + f) )\n \n #print('FOUND!' , d , ' ' , f )\n \n size = sum(total_dim) \n return station_dic, size",
"def _create_directory_entries(self, key, config):\n # Initialize key variables\n updated = False\n dir_dict = {\n 'log_directory': 'log',\n 'ingest_cache_directory': 'cache',\n }\n directory = general.root_directory()\n\n # Setup the key value to a known good default\n if key in config['main']:\n # Verify whether key value is empty\n if config['main'][key] is not None:\n # Create\n if os.path.isdir(config['main'][key]) is False:\n config['main'][key] = ('%s/%s') % (\n directory, dir_dict[key])\n updated = True\n else:\n config['main'][key] = ('%s/%s') % (directory, dir_dict[key])\n updated = True\n else:\n config['main'][key] = ('%s/%s') % (directory, dir_dict[key])\n updated = True\n\n # Return\n return (updated, config)",
"def make_file_dict():\r\n fileDict = {'pageUrls': [],\r\n 'pageFileNames': [],\r\n 'pageIds': [],\r\n 'fileUrls': [],\r\n 'fileIds': [],\r\n 'fileNames': [],\r\n 'cssUrls': [],\r\n 'cssFileNames': [],\r\n 'imgUrls': [],\r\n 'imgFileNames': []}\r\n return fileDict",
"def create_file_dict():\n import os\n file_dict = {}\n for root, dirs, files in os.walk('.'):\n dirs[:] = [ # add any extra dirs to ignore #\n d for d in dirs\n if '.' not in d\n and 'ENV' not in d\n and '__' not in d\n and 'build' not in d\n ]\n for f in files:\n try:\n with open(f, 'r') as thing:\n res = thing.readline()\n except:\n res = ''\n file_name = os.path.join(root, f).lstrip('./')\n file_dict[file_name] = res\n return file_dict",
"def get_directory(self, directory: str) -> List[Dict]:\n raise NotImplementedError",
"def to_dict(\n self,\n attributes: Iterable[str] = (\"xyz\", \"viewdir\", \"imgsz\", \"f\", \"c\", \"k\", \"p\"),\n ) -> Dict[str, tuple]:\n return {key: helpers.numpy_to_native(getattr(self, key)) for key in attributes}",
"def __create_dir_structure_file__(self):\n # | - __create_dir_structure_file__\n\n dir_structure_data = {}\n dir_structure_data[\"tree_level_labels\"] = self.tree_level_labels\n dir_structure_data[\"level_entries_dict\"] = self.level_entries_list\n # TEMP\n dir_structure_data[\"skip_dirs\"] = self.skip_dirs_lst\n\n fle_name = os.path.join(\n self.root_dir,\n self.working_dir,\n \"jobs_bin/dir_structure.json\",\n )\n\n with open(fle_name, \"w\") as fle:\n json.dump(dir_structure_data, fle, indent=2)\n # __|",
"def getattr(self, path, fh=None):\n\n if path != \"/\":\n raise FuseOSError(ENOENT)\n\n attrs = super(IndexView, self).getattr(path, fh)\n attrs.update({\"st_mode\": S_IFDIR | 0o555, \"st_nlink\": 2})\n\n return attrs",
"def _parse_directories(d):\n for k, v in d.items():\n if isinstance(v, abc.Mapping):\n _parse_directories(v)\n else:\n d[k] = os.path.expandvars(v)\n return d",
"def update_dirs(self, dirs: dict):\n if self.dirs is None:\n self.dirs = AttrDict(**dirs)\n else:\n for key, val in dirs.items():\n self.dirs.update({key: val})",
"def get_attributes(self) -> Dict[str, str]:\n pass",
"def __init__(self, dirname, defmode='r'):\n self.name = dirname\n self.defmode = defmode\n\n self.items = []\n\n for i in os.listdir(dirname):\n if os.path.isdir(os.path.join(dirname, i)):\n self.items.append(Tree(os.path.join(dirname, i), defmode))\n\n else:\n self.items.append(open(os.path.join(dirname, i), defmode))\n\n self._dict = self.to_dict()",
"def _GetStatAttribute(self):\n stat_attribute = attribute.StatAttribute()\n stat_attribute.device_number = self._fshfs_file_entry.device_number\n stat_attribute.group_identifier = self._fshfs_file_entry.group_identifier\n stat_attribute.inode_number = self._fshfs_file_entry.identifier\n stat_attribute.mode = self._fshfs_file_entry.file_mode\n stat_attribute.number_of_links = self._fshfs_file_entry.number_of_links\n stat_attribute.owner_identifier = self._fshfs_file_entry.owner_identifier\n stat_attribute.size = self._fshfs_file_entry.size\n stat_attribute.type = self.entry_type\n\n return stat_attribute",
"def _get_attribute_dic(self, attrs):\n attr_dic = {}\n for attr_pair in attrs:\n attr_dic[attr_pair[0]] = attr_pair[1]\n return attr_dic",
"def create_file_hash_dict(cls, file, file_path):\n\n file_info = {}\n file_info['path'] = file_path\n file_info['hash'] = cls.get_256_hash_from_file(file_path)\n file_info['type'] = 'file'\n file_info['name'] = file\n file_info['perm'] = stat.S_IMODE(os.lstat(file_path).st_mode)\n\n return file_info",
"def a_attr_dict (self) :\n return dict (href = self.abs_href)",
"def statfs(self, path, *args, **pargs):\n path, meta = self.find_cue_path(path)\n path = self.clean_path(path)\n stv = os.statvfs(path)\n return dict((key, getattr(stv, key)) for key in (\n 'f_bavail', 'f_bfree', 'f_blocks', 'f_bsize', 'f_favail',\n 'f_ffree', 'f_files', 'f_flag', 'f_frsize', 'f_namemax'))",
"def to_dict(self):\n d = {}\n i = 0\n for entry in self.entries:\n d[i] = {}\n attributes = self.get_attribute_list()\n print (attributes)\n for data in attributes:\n d[i][data] = entry.__getattribute__(data)\n i = i + 1\n return d",
"def get_dir(root_dir):\n\n dir_dict = {}\n\n for item in os.scandir(root_dir):\n item_type = \"\"\n\n if item.is_file():\n item_type = \"[FILE]\"\n elif item.is_dir():\n item_type = \"[DIR]\"\n\n dir_dict[item.name] = item_type\n\n return dir_dict",
"def manifest(self):\n yield self._meta\n for dir_key, meta in self._walk_dir_meta():\n yield {'logical_key': dir_key, 'meta': meta}\n for logical_key, entry in self.walk():\n yield {'logical_key': logical_key, **entry.as_dict()}",
"def process_folder(root, path=\"\"):\n myDict = {}\n if path:\n if root.cd(path):\n for key in ROOT.gDirectory.GetListOfKeys():\n filterKey(root, key, path, myDict, \"__List\")\n else:\n for key in ROOT.gDirectory.GetListOfKeys():\n mypath = ROOT.gDirectory.GetPathStatic()\n filterKey(root, key, mypath, myDict, \"\")\n ROOT.gDirectory.cd(mypath)\n return myDict",
"def create_linkfile_dict(child):\n\n attrs = ['src', 'dest']\n values = [child.get(attr) for attr in attrs]\n\n if None in values:\n raise InvalidManifest(\n 'Missing required attribute in linkfile element'\n )\n\n return dict(zip(attrs, values))",
"def fact():\n manifests = [x for x in os.walk(manifests_dir)]\n\n return { 'manifests': manifests }",
"def get_attrib_dict(self, attribs: Tuple[str]) -> Dict[str, str]:\n attrs = self.get_attribs(attribs)\n attrs = tuple(map(lambda a: (a[0][1:], a[1]), attrs))\n return dict(attrs)"
]
| [
"0.65125257",
"0.63955706",
"0.622794",
"0.61981",
"0.5993693",
"0.59099",
"0.5832498",
"0.58114654",
"0.57562417",
"0.57512605",
"0.5694966",
"0.562954",
"0.5596386",
"0.5554438",
"0.5540832",
"0.5540053",
"0.55319875",
"0.5529663",
"0.5522666",
"0.5500827",
"0.54975873",
"0.5441542",
"0.5439093",
"0.54330117",
"0.5424873",
"0.5415135",
"0.5411886",
"0.5411842",
"0.54109496",
"0.5392993"
]
| 0.7701529 | 0 |
Try to resolve a path as something within a ref of a project. | def resolve_ref_prefix(self, path):
project, remainingPath = self.resolve_project_prefix(path)
if not project:
return None, None, None
for ref in self.cache.list_project_refs(project, self.tagRefs):
try:
treePath = remainingPath.relative_to(pathlib.Path(ref.name))
return project, ref, treePath
except ValueError:
continue
return None, None, None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resolve_ref(ref):\n if ref == DIRTY:\n return ref\n try:\n return git_rev_parse(ref)\n except CommandFailure:\n for remote in git_remote():\n try:\n return git_rev_parse('{remote}/{ref}'.format(**locals()))\n except CommandFailure:\n continue\n return None",
"def resolve_ref(self, path):\n\n project, ref, remainingPath = self.resolve_ref_prefix(path)\n if not ref or remainingPath.as_posix() != '.':\n return None\n\n refTime = iso8601.parse_date(ref.commit['committed_date']).timestamp()\n\n return Entity(\n EntityType.REPOSITORY_DIR,\n path,\n create_directory_attributes(refTime),\n {'project': project, 'ref': ref}\n )",
"def resolve(name):\n arg = Path(name)\n return str(arg.resolve())",
"def resolve_path(self, path):\n\n return (\n self.resolve_root(path) or\n self.resolve_tree(path) or\n self.resolve_ref(path) or\n self.resolve_ref_hierarchy(path) or\n self.resolve_repository_entry(path)\n )",
"def resolve_partial_ref_prefix(self, path):\n\n project, remainingPath = self.resolve_project_prefix(path)\n if not project:\n return None, None, None\n\n refPrefix = remainingPath.as_posix() + '/'\n\n # Resolve to most recently created reference for accurate directory dates\n refs = self.cache.list_project_refs(project, self.tagRefs)\n refs = sorted(refs, key=lambda ref: -iso8601.parse_date(ref.commit['committed_date']).timestamp())\n\n for ref in refs:\n if ref.name.startswith(refPrefix):\n return project, ref, refPrefix\n\n return None, None, None",
"def resolve(self, path=''):\n path = self._sep.join([self.root] + path.split('/'))\n return realpath(path)",
"def resolve_path(self, path):\n if path:\n if path[0] == '/':\n #zope objects case\n try: return self.unrestrictedTraverse(path)\n except: pass\n else:\n #aliss (python) objects case\n try: return self.get_aliss_object(path)\n except: pass\n #case of no path\n pass",
"def parse_resolve(cls, url):\n loc = cls.parse(url)\n if loc.path and loc.path != '/':\n # If true ref name contains slash, a prefix of path might be a suffix of\n # ref. Try to resolve it.\n ref_prefix = None\n if loc.treeish.startswith('refs/'):\n ref_prefix = loc.treeish + '/'\n refs = get_refs(loc.hostname, loc.project, ref_prefix)\n if not refs:\n raise TreeishResolutionError('could not resolve treeish in %s' % url)\n\n treeishes = set(refs.keys())\n # Add branches and tags without a prefix.\n for ref in refs:\n for prefix in ('refs/tags/', 'refs/heads/'):\n if ref.startswith(prefix):\n treeishes.add(ref[len(prefix):])\n break\n loc = cls.parse(url, treeishes=treeishes)\n return loc",
"def resolvePath_(cls, path):\r\n try:\r\n fsref, isFolder, wasAliased = FSResolveAliasFile(os.path.realpath(path), 1)\r\n return os.path.abspath(fsref.as_pathname().decode(u\"utf-8\"))\r\n except MacOS.Error as e:\r\n return None",
"def resolve_ref_hierarchy(self, path):\n\n project, ref, refPrefix = self.resolve_partial_ref_prefix(path)\n if not ref:\n return None\n\n refTime = iso8601.parse_date(ref.commit['committed_date']).timestamp()\n\n return Entity(\n EntityType.REF_LEVEL,\n path,\n create_directory_attributes(refTime),\n {'project': project, 'ref': ref, 'refPrefix': refPrefix}\n )",
"def resolve_reference(ref, rel):\n\n # Find out which module we should be looking in.\n modname = None\n relpath = None\n rel_parts = rel.split('.')\n for i in range(len(rel_parts), 0, -1):\n try_modname = '.'.join(rel_parts[:i])\n if idb.has_module(try_modname):\n modname = try_modname\n relpath = rel_parts[i:]\n break\n\n if not modname:\n return None\n\n refpath = ref.replace('::', '.').split('.')\n\n # Say `rel` is \"panda3d.core.NodePath.node\",\n # and `ref` is \"PandaNode.final\", then we will try these in this order:\n # - panda3d.core::NodePath.node.PandaNode.final\n # - panda3d.core::NodePath.PandaNode.final\n # - panda3d.core::PandaNode.final\n\n for i in range(len(relpath), -1, -1):\n search = relpath[:i] + refpath\n ifunc = idb.lookup_function(modname, search)\n if ifunc:\n # Grab the mangled function name.\n func_name = idb.get_function_name(ifunc, mangle=True)\n return ('meth', '.'.join(relpath[:i] + refpath[:-1] + [func_name]))\n\n itype = idb.lookup_type(modname, search)\n if itype:\n # Grab the original type name.\n type_name = idb.get_type_name(itype, mangle=False, scoped=True)\n return ('class', type_name)",
"def resolved_path(self) -> Path:\n pass",
"def resolved(path: Union[str, Path]) -> str:\n return os.path.basename(os.path.abspath(path))",
"def _dereference(self, ref_url, obj_path, recursions):\n # In order to start dereferencing anything in the referenced URL, we have\n # to read and parse it, of course.\n contents = _url.fetch_url(ref_url, self.__reference_cache, self.__encoding)\n\n # In this inner parser's specification, we can now look for the referenced\n # object.\n value = contents\n if len(obj_path) != 0:\n from prance.util.path import path_get\n try:\n value = path_get(value, obj_path)\n except (KeyError, IndexError, TypeError) as ex:\n raise _url.ResolutionError('Cannot resolve reference \"%s\": %s'\n % (ref_url.geturl(), str(ex)))\n\n # Deep copy value; we don't want to create recursive structures\n import copy\n value = copy.deepcopy(value)\n\n # Now resolve partial specs\n value = self._resolve_partial(ref_url, value, recursions)\n\n # That's it!\n return value",
"def resolve_references(path, schema):\n if isinstance(schema, dict):\n # do $ref first\n if '$ref' in schema:\n # Pull the referenced filepath from the schema\n referenced_file = schema['$ref']\n\n # Referenced filepaths are relative, so take the current path's\n # directory and append the relative, referenced path to it.\n inner_path = os.path.join(os.path.dirname(path), referenced_file)\n\n # Then convert the path (which may contiain '../') into a\n # normalised, absolute path\n inner_path = os.path.abspath(inner_path)\n\n # Load the referenced file\n ref = load_file(\"file://\" + inner_path)\n\n # Check that the references in *this* file are valid\n result = resolve_references(inner_path, ref)\n\n # They were valid, and so were the sub-references. Delete\n # the reference here to ensure we don't pass over it again\n # when checking other files\n del schema['$ref']\n else:\n result = {}\n\n for key, value in schema.items():\n result[key] = resolve_references(path, value)\n return result\n elif isinstance(schema, list):\n return [resolve_references(path, value) for value in schema]\n else:\n return schema",
"def _ref_name_from_path(self, path: str) -> str:\n prefix = \"%s/\" % self._path\n assert path.startswith(prefix)\n return path[len(prefix) :]",
"def _ref_path(self, name: str) -> str:\n assert name.startswith(\"refs/\")\n return posixpath.join(self._path, name)",
"def resolve_project_prefix(self, path):\n\n for nodePath, node in self.cache.get_tree(self.userProjects).items():\n if type(node) is gitlab.v4.objects.Project and path.startswith(nodePath):\n remainingPath = pathlib.Path(path).relative_to(pathlib.Path(nodePath))\n return node, remainingPath\n\n return None, None",
"def _resolve_file_or_none(context_dir, conf, conf_file, has_args=False):\n if not conf:\n return None\n base1 = os.path.expanduser(context_dir)\n base2 = os.path.expanduser(conf)\n path = os.path.join(base1, base2)\n path = os.path.abspath(path) # This resolves \"/../\"\n if not os.path.exists(path):\n raise Exception(\"File does not exist: '%s'. This was \"\n \"referenced in the file '%s'.\" % (path, conf_file))\n return path",
"def resolvePath(rootPath, relPath):\n\trelPath = relPath.lstrip(\"/\")\n\tfullPath = os.path.realpath(os.path.join(rootPath, relPath))\n\tif not fullPath.startswith(rootPath):\n\t\traise ValueError(\n\t\t\t\"Full path %s does not start with resource root %s\"%(fullPath, rootPath))\n\tif not os.path.exists(fullPath):\n\t\traise ValueError(\n\t\t\t\"Invalid path %s. This should not happend.\"%(fullPath))\n\treturn fullPath",
"def parse_ref(url_path):\n ref = url_path.lstrip('/')\n if not ref:\n ref = os.environ.get('DEFAULT_GIT_REF', 'HEAD').strip()\n return ref",
"def _resolve_relative_path(filepath: str):\n if not filepath:\n return None\n\n inf_path = os.path.join(os.path.dirname(__file__), filepath)\n\n return inf_path",
"def resolved(rpath):\r\n return realpath(abspath(rpath))",
"def resolved_path(path):\n path = os.path.abspath(path)\n elements = path_elements(path)\n result = \"\"\n for element in elements:\n segment = element\n segment_path = os.path.join(result, segment)\n if os.path.islink(segment_path):\n segment = os.readlink(segment_path)\n result = os.path.join(result, segment)\n result = os.path.normpath(result)\n return result",
"def _resolve_entry(self, path):\n upath = pycompat.fsdecode(path)\n ent = None\n if path in self._pending_changes:\n val = self._pending_changes[path]\n if val is None:\n raise KeyError\n return val\n t = self._tree\n comps = upath.split('/')\n te = self._tree\n for comp in comps[:-1]:\n te = te[comp]\n t = self._git_repo[te.id]\n ent = t[comps[-1]]\n if ent.filemode == pygit2.GIT_FILEMODE_BLOB:\n flags = b''\n elif ent.filemode == pygit2.GIT_FILEMODE_BLOB_EXECUTABLE:\n flags = b'x'\n elif ent.filemode == pygit2.GIT_FILEMODE_LINK:\n flags = b'l'\n else:\n raise ValueError('unsupported mode %s' % oct(ent.filemode))\n return ent.id.raw, flags",
"def resolve(self, path):\n with self.resolve_cache.get_or_lock(path) as (in_cache, value):\n if in_cache:\n return value\n\n try:\n absolute_path = self.client.resolve(path, **self.client_request_kwargs)['Path']\n except ipfshttpclient.exceptions.ErrorResponse:\n absolute_path = None\n\n if absolute_path is None or not absolute_path.startswith('/ipfs/'):\n self.resolve_cache[path] = None\n return None\n\n cid = absolute_path[6:] # strip '/ipfs/'\n self.resolve_cache[path] = cid\n return cid",
"def rel_resolve(path):\n if os.path.isabs(path):\n return os.path.abspath(path)\n else:\n return os.path.join(SCRIPTDIR, path)",
"def svn_client_resolve(char_path, svn_depth_t_depth, svn_wc_conflict_choice_t_conflict_choice, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass",
"def abspath(self, ref):\n \n directory, path = get_location(self.directory, ref.strip(),\n current=dirname(self.relative))\n path = join_fb_root(join(directory, path))\n return path",
"def referenced_by(self, refobj):\n try:\n ref = cmds.referenceQuery(refobj, referenceNode=True)\n return ref\n except RuntimeError as e:\n if str(e).endswith(\"' is not from a referenced file.\\n\"):\n return None\n else:\n raise e"
]
| [
"0.7151219",
"0.694133",
"0.69117504",
"0.6790696",
"0.6727154",
"0.67200303",
"0.66987157",
"0.6604326",
"0.64834887",
"0.64398366",
"0.6264806",
"0.6253427",
"0.6241761",
"0.6132966",
"0.60928077",
"0.6074949",
"0.6051343",
"0.60149103",
"0.5935395",
"0.59204805",
"0.5909341",
"0.5888107",
"0.5876223",
"0.58624446",
"0.5857919",
"0.58118033",
"0.5805566",
"0.5787191",
"0.5748635",
"0.5747336"
]
| 0.69918907 | 1 |
Try to resolve a path as a level within a hierarchical ref. | def resolve_ref_hierarchy(self, path):
project, ref, refPrefix = self.resolve_partial_ref_prefix(path)
if not ref:
return None
refTime = iso8601.parse_date(ref.commit['committed_date']).timestamp()
return Entity(
EntityType.REF_LEVEL,
path,
create_directory_attributes(refTime),
{'project': project, 'ref': ref, 'refPrefix': refPrefix}
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resolve_path(self, path):\n if path:\n if path[0] == '/':\n #zope objects case\n try: return self.unrestrictedTraverse(path)\n except: pass\n else:\n #aliss (python) objects case\n try: return self.get_aliss_object(path)\n except: pass\n #case of no path\n pass",
"def resolve_path(self, path):\n\n return (\n self.resolve_root(path) or\n self.resolve_tree(path) or\n self.resolve_ref(path) or\n self.resolve_ref_hierarchy(path) or\n self.resolve_repository_entry(path)\n )",
"def pathlookup(obj_or_path_tuple, depth=None, include_origin=True):",
"def resolve_level(target_level, cwd=None):\n if cwd is None:\n cwd = os.getcwd()\n this_level = level(cwd)\n this_idx = levels.index(this_level)\n target_idx = levels.index(target_level)\n pl = [\".\"]\n for i in range(0, this_idx - target_idx):\n pl.append(\"..\")\n return os.path.join(*pl)",
"def _resolve_path(d, path):\n accum_value = d\n for node_key in path:\n accum_value = accum_value[node_key]\n return accum_value",
"def parse_resolve(cls, url):\n loc = cls.parse(url)\n if loc.path and loc.path != '/':\n # If true ref name contains slash, a prefix of path might be a suffix of\n # ref. Try to resolve it.\n ref_prefix = None\n if loc.treeish.startswith('refs/'):\n ref_prefix = loc.treeish + '/'\n refs = get_refs(loc.hostname, loc.project, ref_prefix)\n if not refs:\n raise TreeishResolutionError('could not resolve treeish in %s' % url)\n\n treeishes = set(refs.keys())\n # Add branches and tags without a prefix.\n for ref in refs:\n for prefix in ('refs/tags/', 'refs/heads/'):\n if ref.startswith(prefix):\n treeishes.add(ref[len(prefix):])\n break\n loc = cls.parse(url, treeishes=treeishes)\n return loc",
"def resolve_ref(self, path):\n\n project, ref, remainingPath = self.resolve_ref_prefix(path)\n if not ref or remainingPath.as_posix() != '.':\n return None\n\n refTime = iso8601.parse_date(ref.commit['committed_date']).timestamp()\n\n return Entity(\n EntityType.REPOSITORY_DIR,\n path,\n create_directory_attributes(refTime),\n {'project': project, 'ref': ref}\n )",
"def resolve_ref(ref):\n if ref == DIRTY:\n return ref\n try:\n return git_rev_parse(ref)\n except CommandFailure:\n for remote in git_remote():\n try:\n return git_rev_parse('{remote}/{ref}'.format(**locals()))\n except CommandFailure:\n continue\n return None",
"def tree_lookup(self, target_path, commit):\n segments = target_path.split(\"/\")\n tree_or_blob = commit.tree\n path = ''\n while segments:\n dirent = segments.pop(0)\n if isinstance(tree_or_blob, pygit2.Tree):\n if dirent in tree_or_blob:\n tree_or_blob = self.repo[tree_or_blob[dirent].oid]\n # self.logger.debug('%s in %s' % (dirent, path))\n if path:\n path += '/'\n path += dirent\n else:\n # This is probably because we were called on a\n # commit whose parent added a new directory.\n self.logger.debug(' %s not in %s in %s' %\n (dirent, path, commit.hex[:8]))\n return None\n else:\n self.logger.debug(' %s not a tree in %s' %\n (tree_or_blob, commit.hex[:8]))\n return None\n return tree_or_blob",
"def _resolve_this(self, levels):\n if hasattr(\n sys, \"_getframe\"\n ): # implementation detail of CPython, speeds up things by 100x.\n desired_frame = sys._getframe(3)\n contracted = desired_frame.f_locals[\"self\"]\n else:\n call_frame = inspect.stack(0)[3]\n contracted = call_frame[0].f_locals[\"self\"]\n ref = contracted.ref.split(\".\")\n\n # (0=module, 1=module's parent etc.)\n level_offset = levels - 1\n traversed = self\n for i in range(len(ref) - level_offset):\n traversed = getattr(traversed, ref[i])\n return traversed",
"def resolve_reference(ref, rel):\n\n # Find out which module we should be looking in.\n modname = None\n relpath = None\n rel_parts = rel.split('.')\n for i in range(len(rel_parts), 0, -1):\n try_modname = '.'.join(rel_parts[:i])\n if idb.has_module(try_modname):\n modname = try_modname\n relpath = rel_parts[i:]\n break\n\n if not modname:\n return None\n\n refpath = ref.replace('::', '.').split('.')\n\n # Say `rel` is \"panda3d.core.NodePath.node\",\n # and `ref` is \"PandaNode.final\", then we will try these in this order:\n # - panda3d.core::NodePath.node.PandaNode.final\n # - panda3d.core::NodePath.PandaNode.final\n # - panda3d.core::PandaNode.final\n\n for i in range(len(relpath), -1, -1):\n search = relpath[:i] + refpath\n ifunc = idb.lookup_function(modname, search)\n if ifunc:\n # Grab the mangled function name.\n func_name = idb.get_function_name(ifunc, mangle=True)\n return ('meth', '.'.join(relpath[:i] + refpath[:-1] + [func_name]))\n\n itype = idb.lookup_type(modname, search)\n if itype:\n # Grab the original type name.\n type_name = idb.get_type_name(itype, mangle=False, scoped=True)\n return ('class', type_name)",
"def resolve_ref_prefix(self, path):\n\n project, remainingPath = self.resolve_project_prefix(path)\n if not project:\n return None, None, None\n\n for ref in self.cache.list_project_refs(project, self.tagRefs):\n try:\n treePath = remainingPath.relative_to(pathlib.Path(ref.name))\n return project, ref, treePath\n except ValueError:\n continue\n\n return None, None, None",
"def resolve_partial_ref_prefix(self, path):\n\n project, remainingPath = self.resolve_project_prefix(path)\n if not project:\n return None, None, None\n\n refPrefix = remainingPath.as_posix() + '/'\n\n # Resolve to most recently created reference for accurate directory dates\n refs = self.cache.list_project_refs(project, self.tagRefs)\n refs = sorted(refs, key=lambda ref: -iso8601.parse_date(ref.commit['committed_date']).timestamp())\n\n for ref in refs:\n if ref.name.startswith(refPrefix):\n return project, ref, refPrefix\n\n return None, None, None",
"def _get_from_nest(nest, path):\n if not path or not nest:\n return nest\n return _get_from_nest(nest.get(path[0], None), path[1:])",
"def svn_client_resolve(char_path, svn_depth_t_depth, svn_wc_conflict_choice_t_conflict_choice, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass",
"def resolved_path(self) -> Path:\n pass",
"def unglom(\r\n d: T_StrAnyMapping, path: str, value: typing.Any\r\n) -> T_StrAnyMapping:\r\n try:\r\n return glom.assign(d, path, value)\r\n except KeyError:\r\n parent, child = path.rsplit(\".\", 1)\r\n return unglom(d, parent, {child: value})",
"def resolve(self, path=''):\n path = self._sep.join([self.root] + path.split('/'))\n return realpath(path)",
"def path_to_step(self, path: str):\n path = path or \"\"\n tree = path.split(path_splitter)\n next_level = self\n for step in tree:\n if step not in next_level:\n raise GraphError(\n f\"step {step} doesnt exist in the graph under {next_level.fullname}\"\n )\n next_level = next_level[step]\n return next_level",
"def _look_in_package(tree: dict, module_path: str, name: str, level: Optional[int] = None) -> Union[str, None]:\n parent_path = os.path.dirname(module_path)\n if level is not None:\n for _ in range(level - 1):\n parent_path = os.path.dirname(parent_path)\n parent = find_tree(tree, lambda x, p: x[\"path\"] in [p, os.path.join(p, \"__init__.py\")], args=(parent_path,))\n if parent:\n if parent[\"fullname\"] in [name, \"{}.__init__\".format(name)]:\n return parent[\"path\"]\n for child in parent[\"children\"].values():\n if child[\"name\"] == name:\n return child[\"path\"]\n target = find_tree(tree, lambda x, f: x[\"fullname\"] == f, args=(\"{}.{}\".format(parent[\"fullname\"], name),))\n if target:\n return target[\"path\"]\n return None",
"def resolve(name):\n arg = Path(name)\n return str(arg.resolve())",
"def traverse(path, default=_RAISE_KEYERROR):",
"def follow_path(self, path):\n subtree = self\n for indication in path:\n if indication == 'left':\n subtree = subtree.left_subtree\n else:\n subtree = subtree.right_subtree\n \n return subtree",
"def traverse(name, furtherPath):",
"def check_traverse_and_set_context(self, key, node):\n if key != \"$ref\":\n return False, None\n\n if node.startswith(\"#/\"): # local reference\n try:\n is_local_ref = finddict(self.openapi, fragment_to_keys(node))\n except KeyError:\n is_local_ref = False\n\n # Don't resolve local references already in the spec.\n if is_local_ref:\n return False, None\n # Resolve local references in external files.\n if self.context:\n return True, None\n\n return False, None\n\n if node.startswith(\"http\"): # url reference\n host, fragment = urldefrag(node)\n return True, host\n\n if node.startswith(\"file://\"):\n raise NotImplementedError\n\n host, fragment = urldefrag(node)\n if self.context:\n if self.context.startswith(\"http\"):\n p = urljoin(self.context, host)\n # log.info(f\"trying to set context {p}. Was {self.context}. host is: {host}.\")\n return True, p\n\n p = Path(self.context).parent.joinpath(host)\n # log.info(f\"trying to set context {p}. Was {self.context}. host is: {host}. resolved is {p.resolve()}\")\n if p.is_file():\n return True, str(p.resolve())\n else:\n log.warning(\"can't set context %r. Retains %r\", p, self.context)\n\n # Remote reference should use previous\n # context. Better should be to track\n # nodes with their context.\n return True, None",
"def expand_path(path, start_path, base_depth):\n path = path.replace(start_path, '')\n path = os.path.normpath(path)\n pathdepth = path_depth(path)\n path = path.replace('../', '', base_depth)\n if base_depth > pathdepth:\n for _ in range(base_depth - path_depth(path)):\n path = '_/' + path\n return path",
"def modify_feature_reference(feature,path,ref):\n current_fs = feature\n # Only go (n-1) steps, where n is the length of the path\n for node in path[:-1]:\n if current_fs.has_key(node):\n current_fs = current_fs[node]\n else:\n raise KeyError('No such node %s in the feature structure' % (node))\n\n current_fs[path[-1]] = ref # Change the reference\n \n return",
"def resolve_references(path, schema):\n if isinstance(schema, dict):\n # do $ref first\n if '$ref' in schema:\n # Pull the referenced filepath from the schema\n referenced_file = schema['$ref']\n\n # Referenced filepaths are relative, so take the current path's\n # directory and append the relative, referenced path to it.\n inner_path = os.path.join(os.path.dirname(path), referenced_file)\n\n # Then convert the path (which may contiain '../') into a\n # normalised, absolute path\n inner_path = os.path.abspath(inner_path)\n\n # Load the referenced file\n ref = load_file(\"file://\" + inner_path)\n\n # Check that the references in *this* file are valid\n result = resolve_references(inner_path, ref)\n\n # They were valid, and so were the sub-references. Delete\n # the reference here to ensure we don't pass over it again\n # when checking other files\n del schema['$ref']\n else:\n result = {}\n\n for key, value in schema.items():\n result[key] = resolve_references(path, value)\n return result\n elif isinstance(schema, list):\n return [resolve_references(path, value) for value in schema]\n else:\n return schema",
"def test_expand_path_3(self):\n partial_path = \"/fake/path\"\n input_path = \".\" + partial_path\n expanded_path = basic.expand_path(input_path)\n local_path = Path(\".\").resolve()\n expected_path = str(local_path) + partial_path\n self.assertEqual(expanded_path, expected_path)",
"def acyclic_sub_path(tree, path):\n for u, v in pairwise(reversed(path)):\n if v in tree.nodes and u not in tree.nodes:\n return path[path.index(v):]"
]
| [
"0.68845683",
"0.6494105",
"0.64544624",
"0.611602",
"0.60794085",
"0.60687786",
"0.58192456",
"0.57265294",
"0.5710625",
"0.5643911",
"0.5628097",
"0.5625242",
"0.5588643",
"0.5578107",
"0.5544796",
"0.54888475",
"0.54879767",
"0.5432573",
"0.5405383",
"0.54042643",
"0.5394262",
"0.538477",
"0.5375918",
"0.5374315",
"0.53482836",
"0.5347182",
"0.5290983",
"0.52870595",
"0.52834487",
"0.52799314"
]
| 0.71775824 | 0 |
List the first level of refs of a project. If the project contains hierarchical refs then only the first level of those is returned. For example, a repository containing the branches "master", "feature/abc" and "feature/def" will have this function return the list ["master", "feature"]. | def list_project_refs(self, entity):
refs = []
for ref in self.cache.list_project_refs(entity.objects['project'], self.tagRefs):
# If ref name is hierarchical then only return first level
if '/' in ref.name:
refs.append(ref.name.split('/')[0])
else:
refs.append(ref.name)
# Refs may contain duplicates if the same prefix occurs multiple times
return list(set(refs)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_project_ref_hierarchy(self, entity):\n\n refs = []\n\n for ref in self.cache.list_project_refs(entity.objects['project'], self.tagRefs):\n if ref.name.startswith(entity.objects['refPrefix']):\n remainingRefName = pathlib.Path(ref.name).relative_to(pathlib.Path(entity.objects['refPrefix'])).parts[0]\n refs.append(remainingRefName)\n\n return refs",
"async def get_tree(\n self, ref: str or None = None\n ) -> [\"AIOGitHubAPIRepositoryTreeContent\"] or list:\n if ref is None:\n raise AIOGitHubAPIException(\"Missing ref\")\n _endpoint = f\"/repos/{self.full_name}/git/trees/{ref}\"\n _params = {\"recursive\": \"1\"}\n\n response = await self.client.get(endpoint=_endpoint, params=_params)\n\n return [\n AIOGitHubAPIRepositoryTreeContent(x, self.full_name, ref)\n for x in response.get(\"tree\", [])\n ]",
"def branch_list(repo, remote_name, pattern=None):\n # The return string for a remote reference is a single line with two\n # fields separated by a tab string. The first field is a commit hash.\n # The second field is the reference path. The unique part of the path\n # is the last field.\n #\n # 423f434cd877926ff47f3a710a7b0c414785515e\trefs/heads/enterprise-3.0\n\n lines = repo.git.ls_remote(remote_name, pattern, heads=True).split(\"\\n\")\n return [str(line.split('/')[-1]) for line in lines]",
"def get_refs(self, for_push: bool) -> List[Tuple[str, str]]:\n try:\n loc = posixpath.join(self._path, \"refs\")\n res = self._connection.files_list_folder(loc, recursive=True)\n files = res.entries\n while res.has_more:\n res = self._connection.files_list_folder_continue(res.cursor)\n files.extend(res.entries)\n except dropbox.exceptions.ApiError as e:\n if not isinstance(e.error, dropbox.files.ListFolderError):\n raise\n if not for_push:\n # if we're pushing, it's okay if nothing exists beforehand,\n # but it's good to notify the user just in case\n self._trace(\"repository is empty\", Level.INFO)\n else:\n self._first_push = True\n return []\n files = [i for i in files if isinstance(i, dropbox.files.FileMetadata)]\n paths = [i.path_lower for i in files]\n if not paths:\n return []\n revs: List[str] = []\n data: List[bytes] = []\n for rev, datum in self._get_files(paths):\n revs.append(rev)\n data.append(datum)\n refs = []\n for path, rev, datum in zip(paths, revs, data):\n name = self._ref_name_from_path(path)\n sha = datum.decode(\"utf8\").strip()\n self._refs[name] = (rev, sha)\n refs.append((sha, name))\n return refs",
"async def get_tree(repository, ref):\n try:\n tree = await repository.get_tree(ref)\n return tree\n except AIOGitHubException as exception:\n raise HacsException(exception)",
"def refs(self):\n p = Popen(['git', 'show-ref', '--no-head'], cwd=self.path, stdout=PIPE)\n for line in p.stdout:\n commit_id, refname = line.split()\n yield (CommitId(commit_id), refname)",
"def resolve_ref_prefix(self, path):\n\n project, remainingPath = self.resolve_project_prefix(path)\n if not project:\n return None, None, None\n\n for ref in self.cache.list_project_refs(project, self.tagRefs):\n try:\n treePath = remainingPath.relative_to(pathlib.Path(ref.name))\n return project, ref, treePath\n except ValueError:\n continue\n\n return None, None, None",
"def branch(self, current_path):\n p = subprocess.Popen(\n [\"git\", \"show-ref\"],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n output, error = p.communicate()\n if p.returncode == 0:\n results = []\n try:\n current_branch = self.get_current_branch(current_path)\n for line in output.decode(\"utf-8\").splitlines():\n # The format for git show-ref is '<SHA-1 ID> <space> <reference name>'\n # For this method we are only interested in reference name.\n # Reference : https://git-scm.com/docs/git-show-ref#_output\n commit_sha = line.strip().split()[0].strip()\n reference_name = line.strip().split()[1].strip()\n if self._is_branch(reference_name):\n branch_name = self._get_branch_name(reference_name)\n is_current_branch = self._is_current_branch(\n branch_name, current_branch\n )\n is_remote_branch = self._is_remote_branch(reference_name)\n upstream_branch_name = None\n if not is_remote_branch:\n upstream_branch_name = self.get_upstream_branch(\n current_path, branch_name\n )\n tag = self._get_tag(current_path, commit_sha)\n results.append(\n {\n \"is_current_branch\": is_current_branch,\n \"is_remote_branch\": is_remote_branch,\n \"name\": branch_name,\n \"upstream\": upstream_branch_name,\n \"top_commit\": commit_sha,\n \"tag\": tag,\n }\n )\n\n # Remote branch is seleted use 'git branch -a' as fallback machanism\n # to get add detached head on remote branch to preserve older functionality\n # TODO : Revisit this to checkout new local branch with same name as remote\n # when the remote branch is seleted, VS Code git does the same thing.\n if current_branch == \"HEAD\":\n results.append(\n {\n \"is_current_branch\": True,\n \"is_remote_branch\": False,\n \"name\": self._get_detached_head_name(current_path),\n \"upstream\": None,\n \"top_commit\": None,\n \"tag\": None,\n }\n )\n return {\"code\": p.returncode, \"branches\": results}\n except Exception as downstream_error:\n return {\n \"code\": p.returncode,\n \"command\": \"git show-ref\",\n \"message\": str(downstream_error),\n }\n else:\n return {\n \"code\": p.returncode,\n \"command\": \"git show-ref\",\n \"message\": error.decode(\"utf-8\"),\n }",
"def branches(self):\n return sorted([\n br[20:] for br in self.repo.refs.keys() if (\n br.startswith('refs/remotes/origin/') and\n br[20:] != 'HEAD'\n )\n ])",
"def HeadList(self):\n return [(rname, repo.currenthead) for rname, repo in self.repos.items()\n ]",
"def branches(tree):\n return tree[1:]",
"def branches(tree):\n\n return tree[1:]",
"def listchain(self, rootfirst=False): \n l = [self]\n while 1: \n x = l[-1]\n if x.parent is not None: \n l.append(x.parent) \n else: \n if not rootfirst:\n l.reverse() \n return l",
"def __gitBranchList(self):\n self.vcs.gitListTagBranch(self.project.getProjectPath(), False)",
"def get_branch_list(request, project_id):\n if request.method == 'GET':\n project_entry = GitProjectEntry.objects.filter(id=project_id).first()\n if project_entry is None:\n return res.get_response(404, 'project not found', {})\n\n branch_entries = GitBranchEntry.objects.filter(project=project_entry)\n\n branches = []\n for entry in branch_entries:\n obj = entry.as_object()\n merge_target_entry = GitBranchMergeTargetEntry.objects.filter(\n project=project_entry,\n current_branch=entry\n ).first()\n\n if merge_target_entry is not None:\n obj['target_branch_name'] = merge_target_entry.target_branch.name\n\n branches.append(obj)\n\n return res.get_response(200, '', branches)\n\n return res.get_only_get_allowed({})",
"def parents(rev):\n return (\n subprocess.check_output([\"git\", \"rev-list\", \"-n\", \"1\", \"--parents\", rev])\n .decode()\n .strip()\n .split(\" \")[1:]\n )",
"def resolve_partial_ref_prefix(self, path):\n\n project, remainingPath = self.resolve_project_prefix(path)\n if not project:\n return None, None, None\n\n refPrefix = remainingPath.as_posix() + '/'\n\n # Resolve to most recently created reference for accurate directory dates\n refs = self.cache.list_project_refs(project, self.tagRefs)\n refs = sorted(refs, key=lambda ref: -iso8601.parse_date(ref.commit['committed_date']).timestamp())\n\n for ref in refs:\n if ref.name.startswith(refPrefix):\n return project, ref, refPrefix\n\n return None, None, None",
"def find_branches(self, commit, repo):\n ref_dict = repo.repo.refs.as_dict()\n branches = []\n for branch, branch_id in [(b, ref_dict[b]) for b in repo.branches]:\n obj = repo.repo[branch_id]\n if commit.id == obj.id:\n branches.append((branch, obj))\n return branches",
"def _find_true_parent(repo, head, branch_dict, work_tree):\n if not os.path.exists('.git'):\n # repository not yet initialized\n return head\n branch_names = set()\n # Find all non-deleted branches that Git already knows about...\n for branch in branch_dict.values():\n if branch.git_branch_name and not branch.deleted:\n if repo.lookup_branch(branch.git_branch_name):\n branch_names.add(branch.git_branch_name)\n # ...excluding the branch that is being introduced\n branch_names.discard(head.git_branch_name())\n # Turn all of those into exclusions for git-rev-list\n not_branches = ['^{}'.format(br) for br in branch_names]\n cmd = ['git', 'rev-list', '--date-order', '--parents'] + not_branches\n # Start git-rev-list from the new SHA1 that is being introduced.\n cmd.append(head.new_sha1)\n cwd = os.getcwd()\n os.chdir(work_tree)\n # Initialize p4gf_proc now that we've changed the cwd to the git repo\n # (we lack the functionality to change the cwd after the fact).\n p4gf_proc.init()\n result = p4gf_proc.popen(cmd)\n os.chdir(cwd)\n output = result['out'].strip()\n LOG.debug(\"_find_true_parent() output: %s\", output)\n if len(output) == 0:\n return head\n # Extract the last SHA1 from the git-rev-list output, that is the true\n # parent of this new branch.\n sha1s = output[output.rfind('\\n')+1:].split()\n LOG.debug(\"_find_true_parent() first parents: %s\", sha1s)\n parent_sha1 = sha1s[1] if len(sha1s) > 1 else sha1s[0]\n return PreReceiveTuple(parent_sha1, head.new_sha1, head.ref)",
"def get_branches(self, *, refs=[\"refs/heads\", \"refs/remotes\"]):\n # type: (Sequence[str]) -> List[Branch]\n stdout = self.git(\n \"for-each-ref\",\n (\n \"--format=\"\n \"%(HEAD)%00\"\n \"%(refname)%00\"\n \"%(upstream)%00\"\n \"%(upstream:remotename)%00\"\n \"%(upstream:track,nobracket)%00\"\n \"%(committerdate:unix)%00\"\n \"%(objectname)%00\"\n \"%(contents:subject)\"\n ),\n *refs\n ) # type: str\n branches = [\n branch\n for branch in (\n self._parse_branch_line(line)\n for line in filter_(stdout.splitlines())\n )\n if branch.name != \"HEAD\"\n ]\n store.update_state(self.repo_path, {\"branches\": branches})\n return branches",
"def get_reference_node_parents(ref):\n parents = []\n return parents",
"def get_project_list():\n return parse_list_output(Popen(\n 'openstack project list'.split(), stdout=STDOUT, stderr=STDERR\n ).communicate()[0])",
"def get_git_branch(pkg):\n git_folder = os.path.join(GIT_FOLDER, '%s.git' % pkg)\n if not os.path.exists(git_folder):\n print 'Could not find %s' % git_folder\n return set()\n\n head_folder = os.path.join(git_folder, 'refs', 'heads')\n return set(os.listdir(head_folder))",
"def depthFirstSearch(problem):\n\n\n no = problem.getStartState()\n if (problem.isGoalState(no)):\n return []\n \n pilha = util.Stack()\n pilha.push((no, []))\n \n explorados = []\n \n while not pilha.isEmpty():\n (no, caminho) = pilha.pop()\n \n if problem.isGoalState(no):\n return caminho\n \n explorados.append(no)\n for filho in problem.getSuccessors(no):\n if (filho[0] not in explorados):\n pilha.push((filho[0], caminho + [filho[1]]))\n\n return []",
"def format_project_ref_string(repo_path):\n\n repo = ProjectRepo(repo_path, __project_repo_default_remote__, __project_repo_default_ref__)\n local_commits = repo.new_commits()\n upstream_commits = repo.new_commits(upstream=True)\n no_local_commits = local_commits == 0 or local_commits == '0'\n no_upstream_commits = upstream_commits == 0 or upstream_commits == '0'\n if no_local_commits and no_upstream_commits:\n status = ''\n else:\n local_commits_output = colored('+' + str(local_commits), 'yellow')\n upstream_commits_output = colored('-' + str(upstream_commits), 'red')\n status = '[' + local_commits_output + '/' + upstream_commits_output + ']'\n\n if repo.is_detached():\n current_ref = repo.sha(short=True)\n return colored('(HEAD @ ' + current_ref + ')', 'magenta')\n current_branch = repo.current_branch()\n return colored('(' + current_branch + ')', 'magenta') + status",
"def depth_first_recurse(root_elem):\r\n\tif root_elem is None:\r\n\t\treturn []\r\n\tleft_elements = depth_first_recurse(root_elem.left)\r\n\tright_elements = depth_first_recurse(root_elem.right)\r\n\treturn [root_elem.value, *left_elements, *right_elements]",
"def resolve_ref_hierarchy(self, path):\n\n project, ref, refPrefix = self.resolve_partial_ref_prefix(path)\n if not ref:\n return None\n\n refTime = iso8601.parse_date(ref.commit['committed_date']).timestamp()\n\n return Entity(\n EntityType.REF_LEVEL,\n path,\n create_directory_attributes(refTime),\n {'project': project, 'ref': ref, 'refPrefix': refPrefix}\n )",
"def get(self, project_slug):\n project = Project.query.filter_by(slug=project_slug).first_or_404()\n\n if not (project.public or current_user.is_authenticated()):\n flask_restful.abort(404)\n\n return [\n dict(name=job.git_branch)\n for job\n in (\n project.jobs.distinct(Job.git_branch)\n .order_by(sqlalchemy.asc(Job.git_branch))\n )\n if job.git_branch is not None\n ]",
"def git_ls_tree(branch: str = 'main'):\n branch = quote(branch)\n return f\"git ls-tree -r {branch} --name-only\"",
"async def refs(self, user, repo):\n ref_types = (\"branches\", \"tags\")\n ref_data = [None, None]\n\n for i, ref_type in enumerate(ref_types):\n with self.catch_client_error():\n response = await getattr(self.github_client, \"get_%s\" % ref_type)(\n user, repo\n )\n ref_data[i] = json.loads(response_text(response))\n\n return ref_data"
]
| [
"0.71141195",
"0.6230946",
"0.6007423",
"0.5812723",
"0.57256657",
"0.56375396",
"0.5635291",
"0.5497794",
"0.54817456",
"0.5463489",
"0.54529834",
"0.5440121",
"0.5424686",
"0.54045993",
"0.53898203",
"0.5383756",
"0.53767204",
"0.5367718",
"0.53592324",
"0.53535324",
"0.53532714",
"0.5313267",
"0.5308376",
"0.5302763",
"0.5251782",
"0.5246923",
"0.52057946",
"0.5176826",
"0.5170091",
"0.5150934"
]
| 0.7210496 | 0 |
List the files and directories in a repository subdirectory. | def list_repository_directory(self, entity):
members = []
# There is no directory object if this is the repository root
path = ''
if 'directory' in entity.objects:
path = entity.objects['directory']['path']
for entry in self.cache.get_repository_tree(entity.objects['project'], entity.objects['ref'], path):
if entry['type'] in ('blob', 'tree'):
members.append(entry['name'])
return members | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_dir(self, path):",
"def listdir(self, subdir=''):\n\n try:\n subdir = subdir.decode()\n except AttributeError:\n pass\n subdir = subdir.rstrip('\\\\')\n # cmd = '\"%s\" \"%s\" 0 ' % (self.ndc_path, self.filename)\n cmd = [\n self.ndc_path,\n self.filename,\n '0'\n ]\n if subdir:\n cmd.append(subdir)\n # cmd += '\"%s\"' % subdir\n\n logging.info(cmd)\n try:\n result = check_output(cmd)\n except CalledProcessError:\n raise FileNotFoundError('Subdirectory not found in disk', [])\n\n result = [r.split(b'\\t') for r in result.split(b'\\r\\n')]\n result = list(filter(lambda x: len(x) == 4, result))\n\n filenames = []\n subdirs = []\n for r in result:\n try:\n decoded = r[0].decode('shift_jis')\n if r[2] != b'<DIR>':\n filenames.append(decoded)\n elif r[2] == b'<DIR>' and len(r[0].strip(b'.')) > 0:\n subdirs.append(decoded)\n except UnicodeDecodeError:\n logging.info(\"Couldn't decode one of the strings in the folder: %s\" % subdir)\n continue\n\n return filenames, subdirs",
"def list_directory(project_tree, directory):\n _, subdirs, subfiles = next(project_tree.walk(directory.path))\n return DirectoryListing(directory,\n [Path(join(directory.path, subdir)) for subdir in subdirs\n if not subdir.startswith('.')],\n [Path(join(directory.path, subfile)) for subfile in subfiles])",
"def list_(ctx: click.Context, repository_path):\n root_commands.cmd_list(ctx.obj, repository_path)",
"def svn_fs_dir_entries(*args):\r\n return _fs.svn_fs_dir_entries(*args)",
"def repositories_in_folder(path):\n for thing in os.listdir(path):\n thing = os.path.join(path, thing)\n if os.path.isdir(thing):\n # Is it a repository?\n if repo(thing):\n yield thing\n else:\n yield from repositories_in_folder(thing)\n elif repo(thing):\n yield thing",
"def test_list_directory(self):\n import os\n stat_f = lambda x: FakeStat(33188, 16398844, 65024L, 1, 1049, 1049, 0,\n 1409046988, 1409046988, 1409046988)\n os.stat = stat_f\n os.lstat = stat_f\n expected = [\"subdir1\", \"subdir2\"]\n result = [r[0] for r in self.path_translator.list_directory(\n \"/{0}/search1/tmp/study\".format(self.search.instance))]\n self.assertEqual(result, expected)",
"def listdir(self, path=None, recursive=False):\n actual = self.actual(path, recursive)\n if not actual:\n print('No files or directories found.')\n for n in actual:\n print(n)",
"def listdir(self, path):\n return os.listdir(path)",
"def list_directory(self, path):\n dirent = self.lookup(path)\n if dirent and dirent.is_directory():\n best_fit = self.retrieve_catalog_for_path(path)\n return best_fit.list_directory(path)",
"def _RecursiveDirectoryListing(dirpath):\n result = []\n for root, _, files in os.walk(dirpath):\n for f in files:\n result.append(os.path.relpath(os.path.join(root, f), dirpath))\n return result",
"def directory_contents(self, commit, path):\n\n tree = self._get_tree(commit, path)\n return [c[0] for c in tree]",
"def listdir(self):\n if self._isurl(self._baseurl):\n raise NotImplementedError(\n \"Directory listing of URLs, not supported yet.\")\n else:\n return os.listdir(self._baseurl)",
"def list_directory(self, path):\n try:\n list = os.listdir(path)\n except OSError:\n self.send_error(\n HTTPStatus.NOT_FOUND,\n \"No permission to list directory\")\n return None\n list.sort(key=lambda a: a.lower())\n logging.debug(\"Listing directory %s\" % list)\n r = []\n try:\n displaypath = urllib.parse.unquote(self.path,\n errors='surrogatepass')\n except UnicodeDecodeError:\n displaypath = urllib.parse.unquote(path)\n displaypath = escape(displaypath)\n enc = getfilesystemencoding()\n title = 'Directory listing for %s' % displaypath\n r.append('<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01//EN\" '\n '\"http://www.w3.org/TR/html4/strict.dtd\">')\n r.append('<html>\\n<head>')\n r.append('<meta http-equiv=\"Content-Type\" '\n 'content=\"text/html; charset=%s\">' % enc)\n r.append('<title>%s</title>\\n</head>' % title)\n r.append('<body>\\n<h1>%s</h1>' % title)\n r.append('<hr>\\n<ul>')\n for name in list:\n fullname = os.path.join(path, name)\n displayname = linkname = name\n # Append / for directories or @ for symbolic links\n if os.path.isdir(fullname):\n displayname = name + \"/\"\n linkname = name + \"/\"\n if os.path.islink(fullname):\n displayname = name + \"@\"\n # Note: a link to a directory displays with @ and links with /\n r.append('<li><a href=\"%s\">%s</a></li>'\n % (urllib.parse.quote(linkname,\n errors='surrogatepass'),\n escape(displayname)))\n r.append('</ul>\\n<hr>\\n</body>\\n</html>\\n')\n encoded = '\\n'.join(r).encode(enc, 'surrogateescape')\n f = BytesIO()\n f.write(encoded)\n f.seek(0)\n self.send_response(HTTPStatus.OK)\n # show that we allow range requests\n self.send_header(\"Accept-Ranges\", \"bytes\")\n self.send_header(\"Content-Type\", \"text/html; charset=%s\" % enc)\n self.send_header(\"Content-Length\", str(len(encoded)))\n self.send_cors_headers()\n self.end_headers()\n return f",
"def dirlist(request):\n r = ['<ul class=\"jqueryFileTree\" style=\"display: none;\">']\n try:\n r = ['<ul class=\"jqueryFileTree\" style=\"display: none;\">']\n directory_path = urllib.unquote(request.POST.get('dir', TEMP_DIR))\n directory = directory_path[directory_path.rfind(\"\\\\\"):]\n while directory.startswith(\"\\\\\") or directory.startswith(\"/\"):\n directory = directory[1:]\n directory_path = unicode(os.path.join(PROJECTS_PATH,\n directory_path[directory_path.find(\n 'Projects') + 9:]))\n directory_path = directory_path.replace('\\\\', os.sep).replace('/',\n os.sep)\n if os.name == 'nt':\n directory_path = r'\\\\' + directory_path\n for file in os.listdir(directory_path):\n filepath = os.path.join(directory_path, file)\n if os.path.isdir(filepath):\n r.append('<li class=\"directory_path collapsed\"><a href=\"#\"'\n ' rel=\"%s/\">%s</a></li>' % (filepath, file))\n else:\n ext = os.path.splitext(file)[1][1:] # get .ext and remove dot\n r.append('<li class=\"file ext_%s\">'\n '<a href=\"#\" rel=\"%s\" draggable=\"true\"'\n ' ondragstart=\"drag(event)\">%s</a></li>' % (ext,\n filepath,\n file))\n r.append('</ul>')\n except Exception, ext:\n r.append('Could not load directory_path(%s): %s' % (directory_path,\n str(ext)))\n r.append('</ul>')\n return HttpResponse(''.join(r))",
"def list(self):\n objectpath = os.path.join(self.rootpath, self.OBJECTPATH)\n for root, dirs, files in os.walk(objectpath, topdown=False):\n for name in files:\n print(os.path.join(root, name))",
"def getImmediateSubdirectories(dir):",
"def list_sub(location=''):\n if location != '':\n pathloc = os.path.join(os.getcwd(), location)\n else:\n pathloc = os.getcwd()\n\n print(pathloc)\n\n directory_contents = os.listdir(pathloc)\n sub_directories = []\n for item in directory_contents:\n # list directories\n if os.path.isdir(os.path.join(pathloc, item)):\n sub_directories.append(item)\n sub_directories.sort()\n return sub_directories",
"def _subdirectories(self):\n for o in os.listdir(self.directory):\n if os.path.isdir(os.path.join(self.directory, o)):\n yield os.path.join(self.directory, o)",
"def listdir(self, path: bytes) -> List[bytes]:\n directories, files = self.storage.listdir(path.decode())\n return (\n [b\".\", b\"..\"]\n + [name.encode() for name in directories if name]\n + [name.encode() for name in files if name]\n )",
"def ls(path):\n # Normalise the path\n path = path.strip(\"/\")\n path_depth = len(path.split(\"/\"))\n if not path:\n path_depth = 0\n # Get the set of files/dirs as a dict (name: attrs)\n files = {}\n for entry_path, entry_attrs in config.index.files(path_glob=\"%s*\" % path).items():\n entry_path_parts = entry_path.split(\"/\")\n # Is it a file at our level?\n if len(entry_path_parts) - 1 == path_depth:\n files[entry_path_parts[-1]] = entry_attrs\n # Is it an implicit directory\n else:\n files[entry_path_parts[path_depth]] = {\"size\": \"dir\"}\n # Print the resulting table\n print_files(files)",
"def list_directory(self, path):\n try:\n list = os.listdir(path)\n except os.error:\n self.send_error(404, \"No permission to list directory\")\n return None\n list.sort(key=lambda a: a.lower())\n list = ['..'] + list\n r = []\n displaypath = cgi.escape(urllib.parse.unquote(self.path))\n try:\n displaypath = urllib.parse.unquote(self.path,\n errors='surrogatepass')\n except UnicodeDecodeError:\n displaypath = urllib.parse.unquote(path)\n displaypath = html.escape(displaypath, quote=False)\n enc = sys.getfilesystemencoding()\n r.append('<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\">')\n r.append(\"<html>\\n<title>Directory listing for %s</title>\\n\" % displaypath)\n r.append(\"<body>\\n<h2>Directory listing for %s</h2>\\n\" % displaypath)\n r.append(\"<hr>\\n\")\n r.append(\"<form ENCTYPE=\\\"multipart/form-data\\\" method=\\\"post\\\">\")\n r.append(\"<input name=\\\"file\\\" type=\\\"file\\\"/>\")\n r.append(\"<input type=\\\"submit\\\" value=\\\"upload\\\"/></form>\\n\")\n r.append(\"<hr>\\n<ul>\\n\")\n for name in list:\n fullname = os.path.join(path, name)\n displayname = linkname = name\n if os.path.isdir(fullname):\n displayname = name + \"/\"\n linkname = name + \"/\"\n if os.path.islink(fullname):\n displayname = name + \"@\"\n r.append('<li><a href=\"%s\">%s</a>\\n' % (urllib.parse.quote(linkname),\n html.escape(displayname)))\n r.append(\"</ul>\\n<hr>\\n</body>\\n</html>\\n\")\n encoded = '\\n'.join(r).encode(enc, 'surrogateescape')\n f = BytesIO()\n f.write(encoded)\n f.seek(0)\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html; charset=%s\" % enc)\n self.send_header(\"Content-Length\", str(len(encoded)))\n self.end_headers()\n return f",
"def subdir(self):",
"def listDir(path):\n filenames = []\n for root, dirs, files in os.walk(path):\n for i in files:\n filenames.append(os.path.join(root, i))\n return filenames",
"def list_contents(reader: UFOReader) -> list[str]:\n return reader.getImageDirectoryListing() # type: ignore",
"def list_repo_files(self, repo):\n # check for permissions\n DataHubManager.has_repo_file_privilege(\n self.username, self.repo_base, repo, 'read')\n\n # make a directory for files, if it doesn't already exist\n repo_dir = DataHubManager.create_user_data_folder(self.repo_base, repo)\n\n uploaded_files = sorted([f for f in os.listdir(repo_dir)])\n return uploaded_files",
"def _load_files_from_repository(self) -> typing.List[upload.File]:\n with tempfile.TemporaryDirectory() as tempdir:\n self._execute_command(\n args=['git', 'clone', self.repository_folder, '.'],\n cwd=tempdir,\n )\n to_return = []\n # remove git internal files\n shutil.rmtree(pathlib.Path(tempdir) / '.git')\n for root, _, files in os.walk(tempdir):\n for file in files:\n upload_file = self._load_file(file, root, tempdir)\n to_return.append(upload_file)\n return to_return",
"def listFiles(path):\n outputList = []\n for root, dirs, files in os.walk(path):\n for f in files:\n outputList.append('/'.join([root, f]))\n return outputList",
"def print_directory_contents(path):\n if os.path.isdir(path):\n children = os.listdir(path)\n for child in children:\n child_path = os.path.join(path, child)\n print_directory_contents(child_path)\n else:\n print(path)\n directories.append(path)\n\n return directories",
"def list_directory(self, path):\n try:\n list = os.listdir(path)\n except os.error:\n self.send_error(404, \"No permission to list directory\")\n return None\n list.sort(key=lambda a: a.lower())\n f = StringIO()\n displaypath = cgi.escape(unquote(self.path))\n f.write('<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\">')\n f.write(\"<html>\\n<title>Directory listing for %s</title>\\n\" % displaypath)\n f.write(\"<body>\\n<h2>Directory listing for %s</h2>\\n\" % displaypath)\n f.write(\"<hr>\\n<ul>\\n\")\n for name in list:\n fullname = os.path.join(path, name)\n displayname = linkname = name\n # Append / for directories or @ for symbolic links\n if os.path.isdir(fullname):\n displayname = name + \"/\"\n linkname = name + \"/\"\n if os.path.islink(fullname):\n displayname = name + \"@\"\n # Note: a link to a directory displays with @ and links with /\n f.write('<li><a href=\"%s\">%s</a>\\n'\n % (quote(linkname), cgi.escape(displayname)))\n f.write(\"</ul>\\n<hr>\\n</body>\\n</html>\\n\")\n length = f.tell()\n f.seek(0)\n self.send_response(200)\n encoding = sys.getfilesystemencoding()\n self.send_header(\"Content-type\", \"text/html; charset=%s\" % encoding)\n self.send_header(\"Content-Length\", str(length))\n self.end_headers()\n return f"
]
| [
"0.72475857",
"0.6898686",
"0.679878",
"0.6662444",
"0.6644059",
"0.6518236",
"0.650587",
"0.65055865",
"0.6437218",
"0.6381191",
"0.63552994",
"0.6319831",
"0.6308276",
"0.6297127",
"0.6295903",
"0.6278944",
"0.62660915",
"0.62306625",
"0.62177086",
"0.61986166",
"0.6197154",
"0.61710024",
"0.6169891",
"0.61492574",
"0.6142629",
"0.6141572",
"0.6141234",
"0.61382866",
"0.6134087",
"0.61312824"
]
| 0.7076131 | 1 |
get position of given number | def __getpos__(self, num):
return self.num_to_pos[num] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_position(self, number):\n for rowidx, row in enumerate(self.numbers):\n for colidx, num in enumerate(row):\n if num == number:\n return rowidx, colidx",
"def find_next_number(line, pos=0):\n m = number_re.search(line[pos:])\n if m:\n span = m.span()\n return (span[0]+pos,span[1]+pos)",
"def pos_number(self):\n return self._pos_number.zfill(2)",
"def index(self, pos):\n for i, n in enumerate(self):\n if i == pos: return n\n raise Exception('Index out of bounds.')",
"def getPosition(i, j):\n i += 1\n j += 1\n if i < j:\n i, j = j, i\n num = (i * (i - 1) / 2) + j\n num = int(num)\n return num - 1",
"def find_position(self, val):\n edges = np.array(self.cell_edges)\n if val in edges:\n index = np.searchsorted(edges, val)\n return index, index\n else:\n edges -= val\n if edges[0] > 0:\n return -1, 0\n if edges[-1] < 0:\n return 0, -1\n index = 0\n for i, e in enumerate(edges):\n if e > 0:\n index = i\n break\n return index - 1, index",
"def get_position(self, position):",
"def get(self, position):\n return self.numbers[position[0]][position[1]]",
"def position_index(x, y):\r\n position_action_idx = x + y*8\r\n return position_action_idx",
"def index_from_position_tuple(self, position):\n x = self.base_values.index(position[0])\n y = self.base_values.index(position[1])\n return y * self.size + x",
"def get_position(k):\r\n l = get_level(k)\r\n return (l, k - 2**l)",
"def locate_number(\n pipeline: Pipeline,\n num: int,\n img: ImageBGR,\n) -> Optional[Tuple[int, int]]:\n\n box = locate_front_facing_text(pipeline, str(num), img)\n\n if box is None:\n return None\n\n (cx, cy) = center_of_box(box)\n\n return (round(cx), round(cy))",
"def position_to_index(self, position, grid_size):\n x, y = position\n return x * grid_size + y",
"def position_to_index(position, grid_size):\n return position[0]*grid_size+position[1]",
"def _get_nearest_point(self, position):\n nearest_inds = np.round(position / self._maze.grid_side - 0.5)\n return nearest_inds.astype(int)",
"def get_position(self): # maybe encoded in filepath at some point\n result = (self.iter * self.row_step)% self.row_size, self.iter // (self.row_size * self.row_step)* self.col_step\n self.iter += 1\n return result",
"def get_position(self) -> Tuple[int]:\n return self.position.copy()",
"def _findIndex(self, x):\n if x< self[0][0] or x> self[-1][0]:\n return None\n\n idx = bisect.bisect_left(self.xproxy, x)\n if self[idx][0] == x:\n return idx\n else:\n return idx-1",
"def scanner_pos_at_round( layer, round_num ):\n\tpos = round_num % ( layer.range * 2 - 2 )\n\treturn pos",
"def position_to_index(position, size):\n row, column = position\n return row + (size[1] + 1) * (size[0] - column - 1)",
"def get_digit(self, position):\n return self.complete_address[position - 1]",
"def get_position(self):\n fingers = self._finger\n\n if fingers[1] and not (fingers[2]) and not fingers[3] and not fingers[4]:\n return 1\n elif fingers[1] and (fingers[2]) and not fingers[3] and not fingers[4]:\n return 2\n elif fingers[1] and (fingers[2]) and fingers[3] and not fingers[4]:\n return 3\n elif fingers[1] and (fingers[2]) and fingers[3] and fingers[4]:\n return 4\n return -1",
"def __get_position(self, value):\r\n if len(self.__matrix) > 5:\r\n number = self.AminoAcids()\r\n else:\r\n number = self.Bases()\r\n\r\n if value.upper() == self.A:\r\n return number.A\r\n\r\n elif value.upper() == self.R:\r\n return number.R\r\n\r\n elif value.upper() == self.N:\r\n return number.N\r\n\r\n elif value.upper() == self.D:\r\n return number.D\r\n\r\n elif value.upper() == self.C:\r\n return number.C\r\n\r\n elif value.upper() == self.Q:\r\n return number.Q\r\n\r\n elif value.upper() == self.E:\r\n return number.E\r\n\r\n elif value.upper() == self.G:\r\n return number.G\r\n\r\n elif value.upper() == self.H:\r\n return number.H\r\n\r\n elif value.upper() == self.I:\r\n return number.I\r\n\r\n elif value.upper() == self.L:\r\n return number.L\r\n\r\n elif value.upper() == self.K:\r\n return number.K\r\n\r\n elif value.upper() == self.M:\r\n return number.M\r\n\r\n elif value.upper() == self.F:\r\n return number.F\r\n\r\n elif value.upper() == self.P:\r\n return number.P\r\n\r\n elif value.upper() == self.S:\r\n return number.S\r\n\r\n elif value.upper() == self.T:\r\n return number.T\r\n\r\n elif value.upper() == self.W:\r\n return number.W\r\n\r\n elif value.upper() == self.Y:\r\n return number.Y\r\n\r\n elif value.upper() == self.V:\r\n return number.V\r\n\r\n else:\r\n return number.Star",
"def GetTileIndex(self, pos):\r\n #pixel = rpg_image.GetPixel(self.image, pos)\r\n try:\r\n pixel = self.image_buffer[pos[0]][pos[1]]\r\n except IndexError, e:\r\n pixel = -1\r\n \r\n return pixel",
"def index_to_position(self, index):\n col = index % self._grid_size\n row = index // self._grid_size\n return row, col",
"def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column",
"def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column",
"def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column",
"def __getnum__(self, i, j):\n return self.pos_to_num[(i, j)]",
"def getPionID(self, posisi):\n for idx in range(len(self.arrayPion)):\n if (self.arrayPion[idx].currentPosition == posisi):\n return idx\n return -1"
]
| [
"0.7684351",
"0.7131418",
"0.700196",
"0.69044185",
"0.6890738",
"0.67569715",
"0.6752947",
"0.66520596",
"0.66395706",
"0.66072875",
"0.65305483",
"0.6527862",
"0.6512868",
"0.6511938",
"0.64501953",
"0.6410245",
"0.64076304",
"0.6384556",
"0.6376679",
"0.6374092",
"0.6354298",
"0.63427454",
"0.6342673",
"0.6336528",
"0.6305888",
"0.6292886",
"0.6292886",
"0.6292886",
"0.6292815",
"0.6242221"
]
| 0.8127358 | 0 |
Depthfirst graph traversal, extracts clumps (nodes) and tunnels (edges). Graph must have at least one point that is a junction. | def find_structure(self):
cave_graphs = []
starting_point = None
# firse initalize points
for point in self.points.values():
neighbors = self.get_neighbors(point)
if len(neighbors) != 2 and point.node is None:
starting_point = point
print 'Found graph!'
cave_graphs.append(self.follow_graph(starting_point))
if starting_point is None:
raise Exception("Couldn't detect any junction in the thinned map.")
return cave_graphs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def construct_junction_graph(self) -> Graph[Cell]:\n if self.maze is None:\n raise ValueError('No current maze to construct a junction graph from!')\n\n junction_graph = Graph()\n visited = defaultdict(bool)\n\n def cell_visitor(cell: Cell) -> None:\n visited[cell] = True\n for neighbor in self.maze.neighbors(cell):\n direction = Direction.between(cell, neighbor)\n if direction in cell.open_walls and not visited[neighbor]:\n while self.is_corridor_cell(neighbor):\n neighbor = self.maze.neighbor(neighbor, direction)\n junction_graph.add_vertex(cell)\n junction_graph.add_vertex(neighbor)\n junction_graph.add_edge(cell, neighbor)\n\n self.maze.depth_first_search(self.maze.start_cell, cell_visitor)\n return junction_graph",
"def extract_nodes(graph):\n return graph.get_nodes()",
"def get_components(graph):\n return [graph.subgraph(c).copy() for c in nx.connected_components(graph)]",
"def extractFeatures(graph):\n # create node feature matrix\n v = {}\n for node in graph.Nodes():\n v[node.GetId()] = []\n # compute features\n neighborhoodFeatures(graph, v)\n recursiveFeatures(graph, v)\n return v",
"def find_communities(graph):\n visited = set()\n communities = []\n for node in graph:\n if node not in visited:\n community = _find_community(node, graph, visited)\n communities.append(community)\n\n return communities",
"def cc_visited(ugraph):\n\tremain = []\n\tfor node in ugraph:\n\t\tremain.append(node)\n\tconnected = []\n\twhile remain:\n\t\tvisited = bfs_visited(ugraph, remain[0])\n\t\tconnected.append(visited)\n\t\tremain = [i for i in remain if not i in visited]\n\treturn connected",
"def run(self, infected_graph):\n pos = nx.spring_layout(infected_graph)\n points = np.zeros((len(pos), 2))\n i = 0\n for p in pos:\n points[i] = pos[p]\n i += 1\n \n hull = ConvexHull(points)\n nodes = list(pos)\n return [nodes[p] for p in hull.vertices]",
"def junctions(self):\n for node_name in self._junctions:\n yield node_name, self._data[node_name]",
"def find_galls(graph):\n # Steps 1, 2, 3\n reticulation_nodes = find_reticulation_nodes(graph)\n if reticulation_nodes is None:\n return\n\n cycles = {}\n # 3. For every reticulation node find its two parents. Each of these\n # parents belongs to a chain of the gall.\n for reticulation, parents in reticulation_nodes.items():\n\n # 4. For every parent find its parent and assign it to the same\n # chain. (At each step discover one node from each chain.)\n left_chain = {parents[0]}\n right_chain = {parents[1]}\n\n # 5. Continue this process until a node is found which already\n # belongs to the other chain. This is the beginning node of the\n # gall. If no such node is found, return null.\n current_left = parents[0]\n current_right = parents[1]\n while not left_chain.intersection(right_chain):\n ancestors = get_parents(graph, current_left)\n assert len(ancestors) == 1, \\\n \"Found another reticulation node inside left chain.\"\n current_left = ancestors[0]\n left_chain.add(ancestors[0])\n\n ancestors = get_parents(graph, current_right)\n assert len(ancestors) == 1, \\\n \"Found another reticulation node inside right chain.\"\n current_right = ancestors[0]\n right_chain.add(ancestors[0])\n\n cycles[reticulation] = (left_chain, right_chain)\n\n # 6. After locating all the galls, test the galled tree and the\n # galled network condition.\n # 7. If the galled tree condition holds then characterize the graph\n # as a \"galled tree\".\n # 8. Else if the galled network condition holds then characterize\n # the graph as a \"galled network\".\n # 9. Else return null.\n # 10. Return the located galls.\n if is_galled_tree(graph, cycles):\n return cycles, True\n\n elif is_galled_network(graph, cycles):\n return cycles, False\n\n else:\n return",
"def graph_decomposition(graph = None):\n\tg = graph.copy()\n\ttag_break_points(graph = g)\n\tstems_list = stems(graph = g)\n\tremove_stems(graph = g)\n\n\tcomponent_dict = loops(graph = g)\n\tcomponent_dict['stem'] = stems_list\n\n\thairpinloops = len(component_dict['hairpinloop'])\n\tbulges = len(component_dict['bulge'])\n\tinternal_loops = len(component_dict['internal_loop'])\n\tmultiloop3s = len(component_dict['multiloop3'])\n\tmultiloop4s = len(component_dict['multiloop4'])\n\tmultiloop5s = len(component_dict['multiloop5'])\n\tdangling_ends = len(component_dict['dangling_end'])\n\tstem = len(component_dict['stem'])\n\tseq_length = nx.number_of_nodes(graph)\n\n\tcomponent_counter = {'hairpinloop':hairpinloops , 'bulge':bulges , 'internal_loop':internal_loops , \\\n\t\t\t\t\t\t'multiloop3':multiloop3s , 'multiloop4':multiloop4s , 'multiloop5':multiloop5s, 'dangling_end':dangling_ends , \\\n\t\t\t\t\t\t'stem':stem, 'size':seq_length}\n\treturn component_dict, component_counter",
"def connected_components(self) -> List[list]:\n self.reset_tags()\n ans = []\n visited = dict() # A dictionary of visited nodes\n\n for key in self._graph.get_all_v():\n if not visited.get(key):\n path = self.connected_component(key)\n for node in path:\n visited.__setitem__(node.key, True)\n ans.append(path)\n return ans",
"def cc_visited(ugraph):\r\n\tremaining_node = ugraph.keys()\t\t#The keys are accessible directly.\r\n\t\r\n\tcon_com = [] #connected component\r\n\twhile len(remaining_node) != 0 :\r\n\t\tnode = random.choice(remaining_node)\r\n\t\tvisited = bfs_visited(ugraph,node)\r\n\t\tcon_com.append(visited)\r\n\t\tfor item in visited:\r\n\t\t\tremaining_node.remove(item)\r\n\treturn con_com",
"def connecting_graph(self, solution_graph, weight='cost', ignore_sources=False):\n debut = time.time()\n self.logger.info('start connecting graph')\n\n # we need an undirected graph\n undirected_solution_graph = solution_graph.to_undirected()\n\n if self.old_network_graph is not None and self.modify_old_network:\n undirected_solution_graph = nx.compose(nx.MultiGraph(self.old_network_graph), undirected_solution_graph)\n\n # if already connected\n if nx.is_connected(undirected_solution_graph) == True:\n self.logger.info(\"the solution graph is already connected\")\n return []\n\n # Computing the minimum sources in each component and all junction nodes in the solution graph\n nodetype = nx.get_node_attributes(undirected_solution_graph, config.NODE_TYPE_KEY)\n list_sources = [node for node in nodetype if nodetype[node] == config.SUPPLY_NODE_TYPE]\n\n # 1. Search of all connected subgraphs\n if not ignore_sources:\n reduced_list_sources = []\n while len(list_sources) > 0:\n source, is_isolated = list_sources.pop(0), True\n for i in range(len(list_sources)):\n is_isolated = is_isolated and not (nx.has_path(undirected_solution_graph, source, list_sources[i]))\n if is_isolated:\n reduced_list_sources.append(source)\n else:\n reduced_list_sources = [list(n)[0] for n in nx.connected_components(undirected_solution_graph)]\n\n # 2. Creation of all convex hulls for each source in reduced_list_sources\n hulls = {}\n for source in reduced_list_sources:\n coord_compo = {}\n nodes_connecting_source = nx.node_connected_component(undirected_solution_graph, source)\n for node in nodes_connecting_source:\n xy = tuple(self.optimization_graph.get_node_coordinates(node))\n coord_compo[xy] = node\n if len(coord_compo) > 2:\n convexhull = ConvexHull(list(coord_compo.keys())).points\n else:\n convexhull = list(coord_compo.keys())\n hulls[source] = [coord_compo[tuple(coord)] for coord in convexhull]\n\n # 3. Create list of possible list_edges_to_add\n list_edges_to_add = {} # list of {(S1, S2):(length_of_SP, edges_to_add)}\n\n for S1, S2 in combinations(reduced_list_sources, 2):\n\n # change weight of edges\n for i in range(len(hulls[S1])-1):\n u,v = hulls[S1][i], hulls[S1][i+1]\n self.optimization_graph.add_edge(u,v,key=-1,weight=0)\n self.optimization_graph.add_edge(hulls[S1][-1],hulls[S1][0],key=-1,weight=0)\n for i in range(len(hulls[S2])-1):\n u,v = hulls[S2][i], hulls[S2][i+1]\n self.optimization_graph.add_edge(u,v,key=-1,weight=0)\n self.optimization_graph.add_edge(hulls[S2][-1],hulls[S2][0],key=-1,weight=0)\n\n # find the shortest path\n source, target = hulls[S1][0], hulls[S2][0] # it's a choice to take 0, but no matter\n try:\n length, path = nx.single_source_dijkstra(self.optimization_graph, source, target=target, weight=weight)\n except nx.NetworkXNoPath:\n self.logger.info(\"Source \" + str(S1) + \" and source \" + str(S2) + \" can't be connected\")\n return []\n list_weights = nx.get_edge_attributes(self.optimization_graph, weight)\n\n # edges to add to connect S1 and S2\n edges_to_add = []\n for i in range(len(path) - 1):\n u, v = path[i], path[i + 1]\n # if the edge between (u,v) is not artificial, we add it\n if list_weights.get((u, v, -1), None) != 0 and list_weights.get((u, v, 0), None) is not None:\n edges_to_add.append((u, v, 0))\n if list_weights.get((v, u, -1), None) != 0 and list_weights.get((v, u, 0), None) is not None:\n edges_to_add.append((v, u, 0))\n list_edges_to_add[(S1, S2)] = (length, edges_to_add)\n\n # change weight of edges\n for i in range(len(hulls[S1])-1):\n u,v = hulls[S1][i], hulls[S1][i+1]\n self.optimization_graph.remove_edge(u,v,key=-1)\n self.optimization_graph.remove_edge(hulls[S1][-1],hulls[S1][0],key=-1)\n for i in range(len(hulls[S2])-1):\n u,v = hulls[S2][i], hulls[S2][i+1]\n self.optimization_graph.remove_edge(u,v,key=-1)\n self.optimization_graph.remove_edge(hulls[S2][-1],hulls[S2][0],key=-1)\n\n\n # 4. choice of best edges to add (Kruskal)\n G = nx.Graph()\n for (S1, S2) in list_edges_to_add:\n (length, _) = list_edges_to_add[(S1, S2)]\n if not G.has_node(S1):\n G.add_node(S1)\n if not G.has_node(S2):\n G.add_node(S2)\n G.add_edge(S1, S2, weight=length)\n\n reduced_list_edges_to_add = set()\n T = nx.minimum_spanning_tree(G)\n for u, v in T.edges:\n if (u, v) in list_edges_to_add:\n reduced_list_edges_to_add = reduced_list_edges_to_add.union(set(list_edges_to_add[(u, v)][1]))\n if (v, u) in list_edges_to_add:\n reduced_list_edges_to_add = reduced_list_edges_to_add.union(set(list_edges_to_add[(v, u)][1]))\n\n self.logger.info('end connecting graph in ' + str(time.time() - debut) + ' s')\n return list(reduced_list_edges_to_add)",
"def iterativeDeepeningSearch(problem):\n from util import Stack\n \n for max_depth in range(0, 10000000):\n # print max_depth\n st = Stack()\n mapper = {}\n mapper[(problem.getStartState(), 0)] = None #map of (childpos, depth): (parentpos, direction, depth)\n st.push((problem.getStartState(), 0)) # stack of ((x,y) , depth)\n\n while not(st.isEmpty()):\n vertex = st.pop() #( (x,y) , depth )\n depth = vertex[1]\n\n if (problem.isGoalState(vertex[0])):\n c = vertex\n l = []\n while mapper[c] != None:\n tup = mapper[c]\n l.append(tup[1])\n c = tup[0], tup[2]\n l.reverse()\n print \"max_depth: \", max_depth\n print l\n return l\n\n else:\n n_depth = depth + 1 # new depth\n if n_depth < max_depth:\n neigh = problem.getSuccessors(vertex[0])\n # neigh.reverse()\n for child in neigh:\n if (child[0], n_depth) not in mapper:\n st.push((child[0], n_depth))\n mapper[(child[0], n_depth)] = (vertex[0], child[1], depth)",
"def _find_community(root, graph, visited):\n community = [root]\n visited.add(root)\n next_queue = [root]\n while next_queue:\n node = next_queue.pop(0)\n for child in graph[node]:\n if child not in visited:\n next_queue.append(child)\n community.append(child)\n visited.add(child)\n\n return community",
"def __dfs_roads_directions(self, with_prints=False) -> Set[JuncRoadSingleConnection]:\n\n roads: Set[JuncRoadSingleConnection] = set()\n visited_indices: Set[JuncIndices] = set()\n\n def dfs_rec(junc: JuncNode):\n \"\"\"\n recursively run from the input junction\n :param junc: the junction to run from\n \"\"\"\n # add to visited\n visited_indices.add(junc.indices)\n # run on neighbors\n for neighbor in self.get_connected_juncs(junc):\n # go over unvisited juncs and add roads to them from current\n if neighbor.indices not in visited_indices:\n if with_prints:\n print(junc.indices, neighbor.indices)\n directions = self.get_connection_directions(junc, neighbor)\n roads.add(JuncRoadSingleConnection(junc.indices, neighbor.indices, directions[0], directions[1]))\n dfs_rec(neighbor)\n \"\"\"\n there is a case where we are currently at junc 1, which has neighbors 2,3.\n from junc 1 wwe move to 2, that moves to 3 from it.\n 3 will not go to 1 because 1 is visited, so the road 3->1 will not be created.\n when returning to 1, it will not go to 3, because 3 is visited, so the road 1->3 will not be created.\n so we result in a conncetion with no road, fix it:\n \"\"\"\n if neighbor.indices in visited_indices \\\n and JuncRoadSingleConnection(junc.indices, neighbor.indices) not in roads \\\n and JuncRoadSingleConnection(neighbor.indices, junc.indices) not in roads:\n if with_prints:\n print(\"special\", junc.indices, neighbor.indices)\n directions = self.get_connection_directions(junc, neighbor)\n roads.add(JuncRoadSingleConnection(junc.indices, neighbor.indices, directions[0], directions[1]))\n # do not call dfs recursivly\n\n def first_node(junc: JuncNode):\n \"\"\"\n run specifically for a group-start junction\n :param junc: a junction that is the first in a connected group\n \"\"\"\n # add to visited\n visited_indices.add(junc.indices)\n #\n \"\"\"\n first, choose a random road to be in-road. this prevents a first node with only out-roads.\n a problem: if this is not the first first_node, it is possible that the random road will be \n an already existing road, in the otehr direction. so we need to make sure that the random road we choose\n is not already set in the other side.\n \"\"\"\n neighbors = self.get_connected_juncs(junc).copy()\n in_checked_indices: Set[JuncIndices] = set()\n in_road_junc = choice(neighbors)\n in_checked_indices.add(in_road_junc.indices)\n while JuncRoadSingleConnection(junc.indices, in_road_junc.indices) in roads and len(\n in_checked_indices) != len(neighbors):\n in_road_junc = choice(neighbors)\n in_checked_indices.add(in_road_junc.indices)\n if len(in_checked_indices) != len(neighbors):\n # regular stop, we have found a road to be in_road.\n directions = self.get_connection_directions(in_road_junc, junc)\n roads.add(JuncRoadSingleConnection(in_road_junc.indices, junc.indices, directions[0], directions[1]))\n if with_prints:\n print(\"first in-road\", in_road_junc.indices, junc.indices)\n # run for the rest of the neighbors\n neighbors.remove(in_road_junc)\n # else, this junc has only out-roads and this cannot be fixed.\n for neighbor in neighbors:\n if neighbor.indices not in visited_indices: # the other case is handled through the neighbor in dfs_rec\n if with_prints:\n print(\"first\", junc.indices, neighbor.indices)\n directions = self.get_connection_directions(junc, neighbor)\n roads.add(JuncRoadSingleConnection(junc.indices, neighbor.indices, directions[0], directions[1]))\n dfs_rec(neighbor)\n\n all_juncs_indices: Set[JuncIndices] = {junc.indices for junc in self.get_all_juncs()}\n # the graph may not be connected, should run until all connected parts are visited\n while len(all_juncs_indices) != len(visited_indices):\n # now choose a junc and run on it.\n start_junc = self.get_junc(sample(all_juncs_indices.difference(visited_indices), 1)[0])\n first_node(start_junc)\n return roads",
"def junction_visitor(self, cell: Cell) -> None:\n for neighbor in self.junction_graph.neighbors(cell):\n direction = self.junction_direction(cell, neighbor)\n if direction in cell.open_walls and self.is_in_solution(neighbor):\n self.prev_cells[cell] = neighbor\n break",
"def depth_first_iterate(execution_graph, starting_node, direction,\n through_flows=True, through_retries=True,\n through_tasks=True):\n initial_nodes_iter, connected_to_functors = _extract_connectors(\n execution_graph, starting_node, direction,\n through_flows=through_flows, through_retries=through_retries,\n through_tasks=through_tasks)\n stack = list(initial_nodes_iter)\n while stack:\n node = stack.pop()\n node_attrs = execution_graph.nodes[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n stack.extend(connected_to_functor(node))",
"def gapJunctions(self, recurse = True):\n \n junctions = []\n junctions += self._gapJunctions\n if recurse:\n for subNeurite in self._neurites:\n junctions += subNeurite.gapJunctions()\n return junctions",
"def find_reticulation_nodes(graph):\n # 2. If a node with more than two incoming edges is found, then\n # return null.\n degrees = graph.in_degree()\n if any(deg > 2 for deg in degrees.values()):\n return\n\n # 1. Perform a simple graph traversal in order to locate the\n # reticulation nodes.\n # 3. For every reticulation node find its two parents. Each of these\n # parents belongs to a chain of the gall.\n return {node: get_parents(graph, node) for node in graph\n if degrees[node] == 2}",
"def find_selfloop_nodes(G):\n nodes_in_selfloops = []\n\n # Iterate over all the edges of G\n for u, v in G.edges:\n if u == v: # Check if node u and node v are the same\n nodes_in_selfloops.append(u) # Append node u to nodes_in_selfloops\n return nodes_in_selfloops",
"def path(g): #g: graph\n marked = set()\n nodes = set(g.nodes) \n output = list()\n def recursive(g):\n for i in nodes.copy():\n d = dependents(g,i)\n if (not d) or all(dd in marked for dd in d):\n output.append((i,g.nodes[i]['word']))\n marked.add(i)\n nodes.remove(i)\n if nodes==set([0]):\n break\n recursive(g)\n break\n recursive(g)\n return output",
"def build_graph(self):\n for node in self.graph.nodes():\n self.c2py[node] = PyNode(node)\n for _input in node.inputs():\n if _input not in self.c2py:\n self.c2py[_input] = PyNode(_input, True)\n if _input in self.forward_edge:\n self.forward_edge[_input].append(node)\n else:\n self.forward_edge[_input] = [node]\n for output in node.outputs():\n if output not in self.c2py:\n self.c2py[output] = PyNode(output, True)\n if node in self.forward_edge:\n self.forward_edge[node].append(output)\n else:\n self.forward_edge[node] = [output]",
"def dfs2(G):\r\n\r\n for v in V(G):\r\n v.visited = False\r\n\r\n result = []\r\n\r\n for v in V(G):\r\n if not v.visited:\r\n X = dfs2_visit(v)\r\n result.append(X)\r\n\r\n return result",
"def get_subgraphs(graph):\n nodes_powerset = get_nodes_combinations(graph)\n #print(\"Doing\")\n #draw_graph(graph)\n subgraphs = []\n for nodes in nodes_powerset:\n subg = graph.subgraph(nodes)\n nodes = subg.nodes(data=True)\n if nx.is_weakly_connected(subg):\n subgraphs.append(subg)\n return subgraphs",
"def loops(graph = None):\n\tunknown_structs = []\n\tcompound_structs = []\n\tloops_dict = create_components_dict()\n\tfor subgraph in nx.connected_component_subgraphs(graph):\n\t\tif subgraph.number_of_nodes() < 3:\n\t\t\tunknown_structs.append(subgraph)\n\t\telse:\n\t\t\tif connectivity_threshold(graph = subgraph) > 2 or loop_type(graph= subgraph) == 'NA':\n\t\t\t\tcompound_structs.append(subgraph)\n\t\t\telse:\n\t\t\t\tloops_dict[loop_type(graph= subgraph)].append(subgraph)\n\treturn loops_dict",
"def get_end_nodes(graph):\n nodelist = [\n i\n for i in graph.nodes_iter()\n if len(graph.neighbors(i))==1\n ]\n return nodelist",
"def extract_edges(graph):\n return graph.get_edges()",
"def tour(self, graph):\n solution = self.initialize_solution(graph)\n unvisited = self.get_unvisited_nodes(graph, solution)\n while unvisited:\n node = self.choose_destination(graph, solution.current, unvisited)\n solution.add_node(node)\n unvisited.remove(node)\n solution.close()\n return solution",
"def get_conn_component(T, u):\n # Holds vertices, which have been already visited so the algorithm won't\n # process them again\n visited = []\n # Holds the frontier of the explored area. Vertices to be visited\n # and expanded are chosen from this list\n frontier = [u]\n\n # Following code is a basic DFS, which collects all vertices it explored\n # Invariant : Visited list keeps its property defined higher\n # Variant : Frontier list being reduced by 1 and then expanded by 0...deg(vertex)\n while len(frontier) != 0:\n vertex = frontier.pop()\n\n # Invariant : Frontier list keeps its property defined higher\n # Variant : child vertex of the one currently being expanded\n for child in T[vertex]:\n if not visited.__contains__(child) and not frontier.__contains__(child):\n frontier.append(child)\n visited.append(vertex)\n\n return visited"
]
| [
"0.62955743",
"0.6218852",
"0.60339916",
"0.5890118",
"0.5873582",
"0.5675615",
"0.5669391",
"0.5656545",
"0.56498134",
"0.5575273",
"0.55740166",
"0.55538756",
"0.5547625",
"0.5546689",
"0.55386174",
"0.5508685",
"0.5503386",
"0.5432993",
"0.5432737",
"0.54099274",
"0.5395029",
"0.5387578",
"0.5385528",
"0.5384914",
"0.53602535",
"0.5354546",
"0.5354406",
"0.5344241",
"0.53178746",
"0.53167564"
]
| 0.63038146 | 0 |
This test changes the netbios name of the server and then verifies that this results in the server's domain SID changing. The new SID is stored in a global variable so that we can perform additional tests to verify that SIDs are rewritten properly in group_mapping.tdb. old_netbiosname is stored so that we can reset configuration to what it was prior to the test. Test failure here shows that we failed to write our new SID to the configuration database. | def test_056_netbios_name_change_check_sid(request):
depends(request, ["service_cifs_running"], scope="session")
global new_sid
global old_netbiosname
results = GET("/smb/")
assert results.status_code == 200, results.text
old_netbiosname = results.json()["netbiosname"]
old_sid = results.json()["cifs_SID"]
payload = {
"netbiosname": "nb_new",
}
results = PUT("/smb/", payload)
assert results.status_code == 200, results.text
new_sid_resp = results.json()["cifs_SID"]
assert old_sid != new_sid_resp, results.text
sleep(5)
results = GET("/smb/")
assert results.status_code == 200, results.text
new_sid = results.json()["cifs_SID"]
assert new_sid != old_sid, results.text | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_058_change_netbios_name_and_check_groupmap(request):\n depends(request, [\"SID_CHANGED\", \"ssh_password\"], scope=\"session\")\n payload = {\n \"netbiosname\": old_netbiosname,\n }\n results = PUT(\"/smb/\", payload)\n assert results.status_code == 200, results.text\n sleep(5)\n\n cmd = \"midclt call smb.groupmap_list\"\n results = SSH_TEST(cmd, user, password, ip)\n assert results['result'] is True, results['output']\n groupmaps = json.loads(results['output'].strip())\n\n test_entry = None\n for entry in groupmaps['local'].values():\n if entry['nt_name'] == 'testsidgroup':\n test_entry = entry\n break\n\n assert test_entry is not None, groupmaps['local'].values()\n domain_sid = test_entry['sid'].rsplit(\"-\", 1)[0]\n assert domain_sid != new_sid, groupmaps['local'].values()",
"def test_nickChange(self):\n oldnick = \"foo\"\n newnick = \"bar\"\n self.protocol.register(oldnick)\n self.protocol.irc_RPL_WELCOME(\"prefix\", [\"param\"])\n self.protocol.setNick(newnick)\n self.assertEqual(self.protocol.nickname, oldnick)\n self.protocol.irc_NICK(\"{}!quux@qux\".format(oldnick), [newnick])\n self.assertEqual(self.protocol.nickname, newnick)",
"def test_update_server_name(self):\n fake_update_server = fakes.make_fake_server(\n self.server_id, self.updated_server_name\n )\n\n self.register_uris(\n [\n self.get_nova_discovery_mock_dict(),\n dict(\n method='GET',\n uri=self.get_mock_url(\n 'compute',\n 'public',\n append=['servers', self.server_name],\n ),\n status_code=404,\n ),\n dict(\n method='GET',\n uri=self.get_mock_url(\n 'compute',\n 'public',\n append=['servers', 'detail'],\n qs_elements=['name=%s' % self.server_name],\n ),\n json={'servers': [self.fake_server]},\n ),\n dict(\n method='PUT',\n uri=self.get_mock_url(\n 'compute', 'public', append=['servers', self.server_id]\n ),\n json={'server': fake_update_server},\n validate=dict(\n json={'server': {'name': self.updated_server_name}}\n ),\n ),\n dict(\n method='GET',\n uri=self.get_mock_url(\n 'network', 'public', append=['v2.0', 'networks']\n ),\n json={'networks': []},\n ),\n ]\n )\n self.assertEqual(\n self.updated_server_name,\n self.cloud.update_server(\n self.server_name, name=self.updated_server_name\n )['name'],\n )\n\n self.assert_calls()",
"def updatesid(dn, sid, l):\n mod_attrs = [(ldap.MOD_REPLACE, 'sambaSID', sid )]\n l.modify_s(dn, mod_attrs)",
"def host_renameOpsiDepotserver(self, oldId, newId):",
"def test_057_create_new_smb_group_for_sid_test(request):\n depends(request, [\"SID_CHANGED\", \"ssh_password\"], scope=\"session\")\n global group_id\n payload = {\n \"name\": \"testsidgroup\",\n \"smb\": True,\n }\n results = POST(\"/group/\", payload)\n assert results.status_code == 200, results.text\n group_id = results.json()\n sleep(5)\n\n cmd = \"midclt call smb.groupmap_list\"\n results = SSH_TEST(cmd, user, password, ip)\n assert results['result'] is True, results['output']\n groupmaps = json.loads(results['output'].strip())\n\n test_entry = None\n for entry in groupmaps['local'].values():\n if entry['nt_name'] == 'testsidgroup':\n test_entry = entry\n break\n\n assert test_entry is not None, groupmaps['local'].values()\n domain_sid = test_entry['sid'].rsplit(\"-\", 1)[0]\n assert domain_sid == new_sid, groupmaps['local'].values()",
"def change_nick(self, before, after):\n userdata = self.users[irc.strings.lower(before)]\n self.del_user(before)\n self.add_user(after, userdata)",
"def change_server_ident(name, version=None):\n global server_ident\n \n server_ident[\"server_name\"] = name\n \n if version != None and len(version) > 0:\n server_ident[\"server_version\"] = str(version)\n version_text = \"/%s\" % server_ident[\"server_version\"]\n else:\n version_text = \"\"\n \n server.version = server_ident[\"server_name\"] + version_text",
"def test_sg_update_name(self):\n\n # Add a faked storage_group\n faked_storage_group = self.add_storage_group1()\n storage_group_name = faked_storage_group.name\n\n storage_group_mgr = self.console.storage_groups\n storage_group = storage_group_mgr.find(name=storage_group_name)\n\n new_storage_group_name = \"new-\" + storage_group_name\n\n # Execute the code to be tested\n storage_group.update_properties(\n properties={'name': new_storage_group_name})\n\n # Verify that the resource is no longer found by its old name, using\n # list() (this does not use the name-to-URI cache).\n storage_groups_list = storage_group_mgr.list(\n filter_args=dict(name=storage_group_name))\n assert len(storage_groups_list) == 0\n\n # Verify that the resource is no longer found by its old name, using\n # find() (this uses the name-to-URI cache).\n with pytest.raises(NotFound):\n storage_group_mgr.find(name=storage_group_name)\n\n # Verify that the resource object already reflects the update, even\n # though it has not been refreshed yet.\n assert storage_group.properties['name'] == new_storage_group_name\n\n # Refresh the resource object and verify that it still reflects the\n # update.\n storage_group.pull_full_properties()\n assert storage_group.properties['name'] == new_storage_group_name\n\n # Verify that the resource can be found by its new name, using find()\n new_storage_group_find = storage_group_mgr.find(\n name=new_storage_group_name)\n assert new_storage_group_find.properties['name'] == \\\n new_storage_group_name\n\n # Verify that the resource can be found by its new name, using list()\n new_storage_groups_list = storage_group_mgr.list(\n filter_args=dict(name=new_storage_group_name))\n assert len(new_storage_groups_list) == 1\n new_storage_group_list = new_storage_groups_list[0]\n assert new_storage_group_list.properties['name'] == \\\n new_storage_group_name",
"def on_the_network_global_configuration_page_change_the_first_nameserver_to_nameserver1(driver, nameserver1):\n global nameserver_1\n nameserver_1 = nameserver1\n assert wait_on_element(driver, 7, '//h4[contains(.,\"Hostname and Domain\")]')\n assert wait_on_element(driver, 5, '//input[@ix-auto=\"input__Nameserver 1\"]', 'inputable')\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Nameserver 1\"]').clear()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Nameserver 1\"]').send_keys(nameserver1)",
"def test_overrideAlterCollidedNick(self):\n nick = \"foo\"\n self.protocol.alterCollidedNick = lambda nick: nick + \"***\"\n self.protocol.register(nick)\n self.protocol.irc_ERR_NICKNAMEINUSE(\"prefix\", [\"param\"])\n lastLine = self.getLastLine(self.transport)\n self.assertEqual(lastLine, \"NICK {}\".format(nick + \"***\"))",
"def test_servername(self):\n args = []\n\n def servername(conn):\n args.append((conn, conn.get_servername()))\n\n context = Context(SSLv23_METHOD)\n context.set_tlsext_servername_callback(servername)\n\n # Necessary to actually accept the connection\n context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))\n context.use_certificate(\n load_certificate(FILETYPE_PEM, server_cert_pem)\n )\n\n # Do a little connection to trigger the logic\n server = Connection(context, None)\n server.set_accept_state()\n\n client = Connection(Context(SSLv23_METHOD), None)\n client.set_connect_state()\n client.set_tlsext_host_name(b\"foo1.example.com\")\n\n interact_in_memory(server, client)\n\n assert args == [(server, b\"foo1.example.com\")]",
"def _update_domains_on_server_update(self, server):\n ns_rec_content = self._sanitize_content(\"NS\", server['name'])\n\n LOG.debug(\"Content field of existing NS records will be updated\"\n \" to the following upon server update: %s\" % ns_rec_content)\n try:\n\n # Execute the manually prepared query\n # A TX is required for, at the least, SQLite.\n #\n self.session.begin()\n\n # first determine the old name of the server\n # before making the updates. Since the value\n # is coming from an NS record, the server name\n # will not have a trailing period (.)\n old_ns_rec = self.session.query(models.Record)\\\n .filter_by(type='NS', designate_id=server['id'])\\\n .first()\n if old_ns_rec is not None:\n old_server_name = old_ns_rec.content\n\n LOG.debug(\"old server name read from a backend NS record:\"\n \" %s\" % old_server_name)\n LOG.debug(\"new server name: %s\" % server['name'])\n\n # Then update all NS records that need updating\n # Only the name of a server has changed when we are here\n self.session.query(models.Record)\\\n .filter_by(type='NS', designate_id=server['id'])\\\n .update({\"content\": ns_rec_content})\n\n # Then update all SOA records as necessary\n # Do the SOA last, ensuring we don't trigger a NOTIFY\n # before the NS records are in place.\n #\n # Update the content field of every SOA record that has the\n # old server name as part of its 'content' field to reflect\n # the new server name.\n # Need to strip the trailing period from the server['name']\n # before using it to replace the old_server_name in the SOA\n # record since the SOA record already has a trailing period\n # and we want to keep it\n self.session.execute(models.Record.__table__\n .update()\n .where(and_(models.Record.__table__.c.type == \"SOA\",\n models.Record.__table__.c.content.like\n (\"%s%%\" % old_server_name)))\n .values(content=func.replace(\n models.Record.__table__.c.content,\n old_server_name,\n server['name'].rstrip('.'))\n )\n )\n\n except Exception:\n with excutils.save_and_reraise_exception():\n self.session.rollback()\n # now commit\n else:\n self.session.commit()",
"def test_change_server(self):\n networktables_mock = unittest.mock.Mock()\n\n network_instance = network.Network(networktables_mock, None, None)\n network_instance.change_server(\"localhost\")\n\n # Make sure Networktables was shutdown before network change\n self.assertTrue(networktables_mock.shutdown.called)\n # Make sure new network server ip is correct\n networktables_mock.initialize.assert_called_with(server=\"localhost\")",
"def _set_nameserver(self, instance):\n ctxt = context.get_admin_context()\n ip = db.instance_get_fixed_address(ctxt, instance['id'])\n network = db.fixed_ip_get_network(ctxt, ip)\n\n try:\n _, err = utils.execute('sudo', 'vzctl', 'set', instance['id'],\n '--save', '--nameserver', network['dns'])\n if err:\n LOG.error(err)\n except Exception as err:\n LOG.error(err)\n raise exception.Error('Unable to set nameserver for %s' %\n instance['id'])",
"def test_rename_snapshot_by_snap_id(self):\n snapshot_info, sg_name = self.create_sg_snapshot()\n old_name = snapshot_info.get('name')\n snap_id = snapshot_info.get('snapid')\n new_name = self.generate_name(object_type='ss')\n self.replication.rename_snapshot_by_snap_id(\n sg_name, old_name, new_name, snap_id)\n snap_list = self.replication.get_storage_group_snapshot_list(\n sg_name)\n self.assertEqual(new_name, snap_list[0])\n # change name back so clean up will work automatically\n self.replication.rename_snapshot_by_snap_id(\n sg_name, new_name, old_name, snap_id)",
"def set_sys_name(self, sNewSysName):\n\t\tcall_sdk_function('PrlVmDev_SetSysName', self.handle, sNewSysName)",
"def test_edit_rename(self):\n group = groups.get_by_name(\"First Group\")\n new_name = 'BRAND-NEW-NAME'\n self.open_url('/group/edit/{0}'.format(group.id))\n el = self.wd.find_element(By.ID, \"name\")\n el.clear()\n el.send_keys(new_name)\n self.submit_form(\"group_form\")\n self.assertEquals('Group List', self.wd.title)\n self.assert_in_list_table(new_name)",
"def test_change_name_without_name(self):\r\n self.client.login(username=self.student.username, password='test')\r\n change_name_url = self.get_url()\r\n resp = self.client.post(change_name_url, {\r\n 'new_name': '',\r\n 'rationale': 'change identity'\r\n })\r\n response_data = json.loads(resp.content)\r\n self.assertFalse(response_data['success'])",
"def userRenamed(self, old, new):\n sessions = self.findSessions(old)\n for ss in sessions:\n old = old.decode(ss.encoding)\n new = new.decode(ss.encoding)\n self.sendResponse(ss.rename(old, new))",
"def change_surname(change_account):\n change_data(change_account, changed_data='surname')",
"def test_erroneousNick(self):\n # Registration case: change illegal nickname to erroneousNickFallback\n badnick = \"foo\"\n self.assertEqual(self.protocol._registered, False)\n self.protocol.register(badnick)\n self.protocol.irc_ERR_ERRONEUSNICKNAME(\"prefix\", [\"param\"])\n lastLine = self.getLastLine(self.transport)\n self.assertEqual(\n lastLine, \"NICK {}\".format(self.protocol.erroneousNickFallback)\n )\n self.protocol.irc_RPL_WELCOME(\"prefix\", [\"param\"])\n self.assertEqual(self.protocol._registered, True)\n self.protocol.setNick(self.protocol.erroneousNickFallback)\n self.assertEqual(self.protocol.nickname, self.protocol.erroneousNickFallback)\n\n # Illegal nick change attempt after registration. Fall back to the old\n # nickname instead of erroneousNickFallback.\n oldnick = self.protocol.nickname\n self.protocol.setNick(badnick)\n self.protocol.irc_ERR_ERRONEUSNICKNAME(\"prefix\", [\"param\"])\n lastLine = self.getLastLine(self.transport)\n self.assertEqual(lastLine, \"NICK {}\".format(badnick))\n self.assertEqual(self.protocol.nickname, oldnick)",
"def _switch_nick(self):\n self.nickname = self.firstnick + str(random.randint(1000, 9999))\n self._log(self.botlog, 'Switching to nick %s' % self.nickname)\n self._send('NICK %s' % self.nickname)",
"def set_domain_sid(self, sid):\n dsdb._samdb_set_domain_sid(self, sid)",
"def test_00_rename_machine(self):\n client = self.client\n\n # Make sure the environment is as expected.\n j = check_json(client, 'api/db_default/v4/nts/machines/1')\n self.assertEqual(j['machine']['name'], 'localhost__clang_DEV__x86_64')\n\n data = {\n 'action': 'rename',\n 'name': 'new_machine_name',\n }\n resp = client.post('api/db_default/v4/nts/machines/1', data=data)\n self.assertEqual(resp.status_code, 401)\n\n resp = client.post('api/db_default/v4/nts/machines/1', data=data,\n headers={'AuthToken': 'wrong token'})\n self.assertEqual(resp.status_code, 401)\n\n resp = client.post('api/db_default/v4/nts/machines/1', data=data,\n headers={'AuthToken': 'test_token'})\n self.assertEqual(resp.status_code, 200)\n\n # Machine should be renamed now.\n j = check_json(client, 'api/db_default/v4/nts/machines/1')\n self.assertEqual(j['machine']['name'], 'new_machine_name')",
"def player_changename(event_var):\r\n debug.write(\"[SourceRPG] Handling player_changename\", 1)\r\n players[event_var['userid']]['name'] = event_var['newname']\r\n debug.write(\"[SourceRPG] player_changename handled\", 1)",
"def setNetGroup(addr): #status: Done, not tested\r\n pass",
"def test_rebuilt_server_hostname(self):\n remote_client = self.server_behaviors.get_remote_instance_client(\n self.server, self.servers_config)\n hostname = remote_client.get_hostname()\n self.assertEqual(hostname, self.expected_name)",
"def change_client_name(self, name, client):\n if self.name_is_unique(name):\n client.set_name(name)\n self.send_message('Usuario actualizado exitosamente.', client.get_socket())\n else:\n self.send_message('Nombre repetido.', client.get_socket())",
"def test_rename_org(client: Client) -> None:\n with dev_login(client, 'admin'):\n # Create an org\n resp = client.post('api/v1/org', json={\n 'name': 'testorg12'\n })\n org_id = resp.json['id']\n assert 200 <= resp.status_code <= 300\n\n # Create the second test org\n resp = client.post('api/v1/org', json={\n 'name': 'renameorgtest'\n })\n org_rename_test_id = resp.json['id']\n org_rename_test_name = resp.json['name']\n assert 200 <= resp.status_code <= 300\n\n new_name = 'testorgkevinwashere'\n\n # Test that user can successful rename the org\n client.put(f'/api/v1/org/{org_id}/rename/{new_name}')\n assert 200 <= resp.status_code <= 300\n\n # Get the org name of org_id and check if it has changed\n resp = client.get(f'/api/v1/org/{org_id}')\n assert 200 <= resp.status_code <= 300\n assert resp.json['name'] == new_name\n\n # Check that we cannot rename the org to an org that already exists\n resp = client.put(f'/api/v1/org/{org_rename_test_id}/rename/{org_rename_test_name}')\n assert resp.status_code == 403\n\n # Test that renaming an org that doesn't exist won't work\n resp = client.put(f'/api/v1/org/THISORGDOESN\\'TEXIST/rename/{org_rename_test_id}')\n assert resp.status_code == 404"
]
| [
"0.7362551",
"0.61641616",
"0.6045183",
"0.60308814",
"0.5808337",
"0.58069056",
"0.5755924",
"0.5711386",
"0.56862813",
"0.5601481",
"0.55547637",
"0.55288833",
"0.5508139",
"0.5473006",
"0.5441461",
"0.5417473",
"0.54068434",
"0.5388924",
"0.5378174",
"0.5349858",
"0.53214437",
"0.52781516",
"0.5267417",
"0.5261626",
"0.52369416",
"0.5224986",
"0.52202857",
"0.52180845",
"0.52097815",
"0.5149243"
]
| 0.7686053 | 0 |
Create testgroup and verify that groupmap entry generated with new SID. | def test_057_create_new_smb_group_for_sid_test(request):
depends(request, ["SID_CHANGED", "ssh_password"], scope="session")
global group_id
payload = {
"name": "testsidgroup",
"smb": True,
}
results = POST("/group/", payload)
assert results.status_code == 200, results.text
group_id = results.json()
sleep(5)
cmd = "midclt call smb.groupmap_list"
results = SSH_TEST(cmd, user, password, ip)
assert results['result'] is True, results['output']
groupmaps = json.loads(results['output'].strip())
test_entry = None
for entry in groupmaps['local'].values():
if entry['nt_name'] == 'testsidgroup':
test_entry = entry
break
assert test_entry is not None, groupmaps['local'].values()
domain_sid = test_entry['sid'].rsplit("-", 1)[0]
assert domain_sid == new_sid, groupmaps['local'].values() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_group(self):\n pass",
"def test_create_group(self):\n pass",
"def test_verify_that_you_can_create_a_new_group():",
"def test_create_group(self):\n groupid = 'villains'\n\n # create the group\n resp = self.app.post('/groups', data=json.dumps({'name':groupid}))\n assert resp.status_code == 200\n\n # Fetch the group to check that it persists\n resp = self.app.get('/groups/{}'.format(groupid))\n assert resp.status_code == 200",
"def test_create_resource_group(self):\n pass",
"def test_createGroup(self):\n\t\tself.client.force_authenticate(user=User.objects.get(id=1))\n\t\turl = \"/groups/\"\n\t\tdata = {\n\t\t\t'name' : 'testGroup3',\n\t\t\t'description' : 'This is another test group that just created.',\n\t\t\t'isPublic' : True\n\t\t}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\tself.assertEqual(response.data[\"id\"], 3)\n\t\tself.assertEqual(response.data[\"name\"], 'testGroup3')",
"def test_add_group(self):\n pass",
"def test_new_group(self, inventoryloader):\n inventoryloader.add_group(u'newgroup')\n assert 'newgroup' in inventoryloader.groups",
"def test_add(self):\n # add a new group\n self.assertTrue(self.run_function(\"group.add\", [self._group], gid=self._gid))\n group_info = self.run_function(\"group.info\", [self._group])\n self.assertEqual(group_info[\"gid\"], self._gid)\n self.assertEqual(group_info[\"name\"], self._group)\n # try adding the group again\n self.assertFalse(self.run_function(\"group.add\", [self._group], gid=self._gid))",
"def test_add_system_group_gid(self):\n\n gid = self.__get_free_system_gid()\n\n # add a new system group\n self.assertTrue(self.run_function(\"group.add\", [self._group, gid, True]))\n group_info = self.run_function(\"group.info\", [self._group])\n self.assertEqual(group_info[\"name\"], self._group)\n self.assertEqual(group_info[\"gid\"], gid)\n # try adding the group again\n self.assertFalse(self.run_function(\"group.add\", [self._group, gid]))",
"def test_add_system_group(self):\n\n gid_min, gid_max = self.__get_system_group_gid_range()\n\n # add a new system group\n self.assertTrue(self.run_function(\"group.add\", [self._group, None, True]))\n group_info = self.run_function(\"group.info\", [self._group])\n self.assertEqual(group_info[\"name\"], self._group)\n self.assertTrue(gid_min <= group_info[\"gid\"] <= gid_max)\n # try adding the group again\n self.assertFalse(self.run_function(\"group.add\", [self._group]))",
"def test_create_existing(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n\n dset = grp.require_dataset('foo', (10, 3), 'float32')\n dset2 = grp.require_dataset('foo', (10, 3), 'float32')\n\n assert dset == dset2",
"def test_groups_group_id_state_put(self):\n pass",
"def test_create_device_group(self):\n pass",
"def test_user_group_controller_create(self):\n pass",
"def test_000_add_group(self):\n # This case is always passed because it's tested in setup_module,\n # If setup module fails, this case will never run\n pass",
"def test_system_group_create(audreyvars, tunnel_requested, system_groups):\n server = audreyvars[\"KATELLO_HOST\"]\n login = audreyvars.get(\"KATELLO_USER\", \"admin\")\n org = audreyvars.get(\"KATELLO_ORG\", \"redhat\")\n password = audreyvars.get(\"KATELLO_PASS\", \"admin\")\n\n # If using a tunnel to access ec2, an alternative port is needed\n if tunnel_requested:\n port = audreyvars.get(\"SSH_TUNNEL_KATELLO_PORT\", 1443)\n else:\n port = audreyvars.get(\"KATELLO_PORT\", 443)\n\n # Query existing system groups\n current_group_names = [g.get('name') for g in common.katello.system_group_query(server, port, org, login, password)]\n\n # Determine whether groups were created\n new_group_ids = []\n for group_name in system_groups:\n if group_name not in current_group_names:\n result_dict = common.katello.system_group_create(server, port, org, login, password, group_name)\n new_group_ids.append(result_dict.get('id'))\n\n if len(new_group_ids) == 0:\n pytest.skip(msg=\"System groups already exist, no groups created\")",
"def test_create_group_409(self):\n request = {\n 'name': self.test_group1_groupid\n }\n # First create a group indirectly by making a user with a group\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n # Now create a group that is already there\n resp = self.app.post('/groups', data=json.dumps(request))\n assert resp.status_code == 409",
"def test_api_v1_groups_id_put(self):\n pass",
"def test_group_of_one(self):\n self.testcases[0].group_id = 1\n self.testcases[0].put()\n self.testcases[1].key.delete()\n\n grouper.group_testcases()\n\n testcase = data_handler.get_testcase_by_id(self.testcases[0].key.id())\n self.assertEqual(testcase.group_id, 0)\n self.assertTrue(testcase.is_leader)",
"def test_058_change_netbios_name_and_check_groupmap(request):\n depends(request, [\"SID_CHANGED\", \"ssh_password\"], scope=\"session\")\n payload = {\n \"netbiosname\": old_netbiosname,\n }\n results = PUT(\"/smb/\", payload)\n assert results.status_code == 200, results.text\n sleep(5)\n\n cmd = \"midclt call smb.groupmap_list\"\n results = SSH_TEST(cmd, user, password, ip)\n assert results['result'] is True, results['output']\n groupmaps = json.loads(results['output'].strip())\n\n test_entry = None\n for entry in groupmaps['local'].values():\n if entry['nt_name'] == 'testsidgroup':\n test_entry = entry\n break\n\n assert test_entry is not None, groupmaps['local'].values()\n domain_sid = test_entry['sid'].rsplit(\"-\", 1)[0]\n assert domain_sid != new_sid, groupmaps['local'].values()",
"def test_060_add_group_to_group(self):\n\n testflow.step(\"Adding group %s to group %s\", TEST_GROUP1, TEST_GROUP2)\n assert MANAGE_CLI.run(\n 'groupadd',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to add group to group '%s'\" % TEST_GROUP1",
"def test_create_team_user_group(client):\n group = client.create_team_user_group(TEAM_ID, {\n \"name\": \"Python group\",\n \"is_reviewer\": True,\n \"is_admin\": True,\n \"admin_rights\": [\"upload\"]\n })\n assert group.team_id == TEAM_ID\n assert group.group_id == NEW_GROUP_ID\n assert group.name == \"Python group\"\n assert group.permissions['is_admin']\n assert group.permissions['is_reviewer']\n assert group.permissions['admin_rights'] == [\"upload\"]",
"def test_chgid(self):\n self.run_function(\"group.add\", [self._group], gid=self._gid)\n self.assertTrue(self.run_function(\"group.chgid\", [self._group, self._new_gid]))\n group_info = self.run_function(\"group.info\", [self._group])\n self.assertEqual(group_info[\"gid\"], self._new_gid)",
"def test_create_simple(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (1,))\n assert dset.shape == (1,)",
"def test_add_duplicate(self):\n self.open_url('/group/add')\n el = self.wd.find_element(By.ID, \"name\")\n el.send_keys(\"First Group\")\n self.submit_form(\"group_form\")\n self.assert_form_error(\"Group \\\"First Group\\\" already exists.\")",
"def test_save(self, name='test'):\n group = Group(name=name)\n group.save()\n return group",
"def test_create(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.require_dataset('foo', (10, 3), 'f')\n assert isinstance(dset, Dataset)\n assert dset.shape == (10, 3)\n\n dset2 = grp.require_dataset('bar', data=(3, 10))\n dset3 = grp.require_dataset('bar', data=(4, 11))\n assert isinstance(dset2, Dataset)\n assert np.all(dset2[:] == (3, 10))\n assert np.all(dset3[:] == (3, 10))\n assert dset2 == dset3",
"def _get_new_group_id():\n new_group = data_types.TestcaseGroup()\n new_group.put()\n return new_group.key.id()",
"def test_createGroup(self):\n tabGroup = widgets.TabGroup(u'group1', u'Group', tabs=[\n widgets.Tab(u'id4', u'Title 4', self.contentFactory)])\n tabs = self.tabs + [\n tabGroup,\n widgets.Tab(u'id5', u'Title 5', self.contentFactory)]\n tabView = widgets.TabView(tabs)\n self.assertEquals(\n tabView.getTabIDs(),\n [u'id1', u'id2', u'id3', u'id4', u'id5'])\n self.assertEquals(\n tabView._tabGroups,\n {u'group1': tabGroup})"
]
| [
"0.7478963",
"0.7478963",
"0.7324722",
"0.7320051",
"0.698248",
"0.6931495",
"0.68939006",
"0.6839453",
"0.68214154",
"0.6734239",
"0.6696365",
"0.6614686",
"0.65505034",
"0.6506986",
"0.6498141",
"0.6479654",
"0.64630705",
"0.6456968",
"0.6449117",
"0.6447869",
"0.6438386",
"0.64319223",
"0.6402241",
"0.6370937",
"0.63238573",
"0.6316505",
"0.6295232",
"0.62724817",
"0.6259744",
"0.6221109"
]
| 0.81538206 | 0 |
Verify that changes to netbios name result in groupmap sid changes. | def test_058_change_netbios_name_and_check_groupmap(request):
depends(request, ["SID_CHANGED", "ssh_password"], scope="session")
payload = {
"netbiosname": old_netbiosname,
}
results = PUT("/smb/", payload)
assert results.status_code == 200, results.text
sleep(5)
cmd = "midclt call smb.groupmap_list"
results = SSH_TEST(cmd, user, password, ip)
assert results['result'] is True, results['output']
groupmaps = json.loads(results['output'].strip())
test_entry = None
for entry in groupmaps['local'].values():
if entry['nt_name'] == 'testsidgroup':
test_entry = entry
break
assert test_entry is not None, groupmaps['local'].values()
domain_sid = test_entry['sid'].rsplit("-", 1)[0]
assert domain_sid != new_sid, groupmaps['local'].values() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_056_netbios_name_change_check_sid(request):\n depends(request, [\"service_cifs_running\"], scope=\"session\")\n global new_sid\n global old_netbiosname\n\n results = GET(\"/smb/\")\n assert results.status_code == 200, results.text\n old_netbiosname = results.json()[\"netbiosname\"]\n old_sid = results.json()[\"cifs_SID\"]\n\n payload = {\n \"netbiosname\": \"nb_new\",\n }\n results = PUT(\"/smb/\", payload)\n assert results.status_code == 200, results.text\n new_sid_resp = results.json()[\"cifs_SID\"]\n assert old_sid != new_sid_resp, results.text\n sleep(5)\n\n results = GET(\"/smb/\")\n assert results.status_code == 200, results.text\n new_sid = results.json()[\"cifs_SID\"]\n assert new_sid != old_sid, results.text",
"def test_057_create_new_smb_group_for_sid_test(request):\n depends(request, [\"SID_CHANGED\", \"ssh_password\"], scope=\"session\")\n global group_id\n payload = {\n \"name\": \"testsidgroup\",\n \"smb\": True,\n }\n results = POST(\"/group/\", payload)\n assert results.status_code == 200, results.text\n group_id = results.json()\n sleep(5)\n\n cmd = \"midclt call smb.groupmap_list\"\n results = SSH_TEST(cmd, user, password, ip)\n assert results['result'] is True, results['output']\n groupmaps = json.loads(results['output'].strip())\n\n test_entry = None\n for entry in groupmaps['local'].values():\n if entry['nt_name'] == 'testsidgroup':\n test_entry = entry\n break\n\n assert test_entry is not None, groupmaps['local'].values()\n domain_sid = test_entry['sid'].rsplit(\"-\", 1)[0]\n assert domain_sid == new_sid, groupmaps['local'].values()",
"def test_sg_update_name(self):\n\n # Add a faked storage_group\n faked_storage_group = self.add_storage_group1()\n storage_group_name = faked_storage_group.name\n\n storage_group_mgr = self.console.storage_groups\n storage_group = storage_group_mgr.find(name=storage_group_name)\n\n new_storage_group_name = \"new-\" + storage_group_name\n\n # Execute the code to be tested\n storage_group.update_properties(\n properties={'name': new_storage_group_name})\n\n # Verify that the resource is no longer found by its old name, using\n # list() (this does not use the name-to-URI cache).\n storage_groups_list = storage_group_mgr.list(\n filter_args=dict(name=storage_group_name))\n assert len(storage_groups_list) == 0\n\n # Verify that the resource is no longer found by its old name, using\n # find() (this uses the name-to-URI cache).\n with pytest.raises(NotFound):\n storage_group_mgr.find(name=storage_group_name)\n\n # Verify that the resource object already reflects the update, even\n # though it has not been refreshed yet.\n assert storage_group.properties['name'] == new_storage_group_name\n\n # Refresh the resource object and verify that it still reflects the\n # update.\n storage_group.pull_full_properties()\n assert storage_group.properties['name'] == new_storage_group_name\n\n # Verify that the resource can be found by its new name, using find()\n new_storage_group_find = storage_group_mgr.find(\n name=new_storage_group_name)\n assert new_storage_group_find.properties['name'] == \\\n new_storage_group_name\n\n # Verify that the resource can be found by its new name, using list()\n new_storage_groups_list = storage_group_mgr.list(\n filter_args=dict(name=new_storage_group_name))\n assert len(new_storage_groups_list) == 1\n new_storage_group_list = new_storage_groups_list[0]\n assert new_storage_group_list.properties['name'] == \\\n new_storage_group_name",
"def test_edit_rename(self):\n group = groups.get_by_name(\"First Group\")\n new_name = 'BRAND-NEW-NAME'\n self.open_url('/group/edit/{0}'.format(group.id))\n el = self.wd.find_element(By.ID, \"name\")\n el.clear()\n el.send_keys(new_name)\n self.submit_form(\"group_form\")\n self.assertEquals('Group List', self.wd.title)\n self.assert_in_list_table(new_name)",
"def test_rename_snapshot_by_snap_id(self):\n snapshot_info, sg_name = self.create_sg_snapshot()\n old_name = snapshot_info.get('name')\n snap_id = snapshot_info.get('snapid')\n new_name = self.generate_name(object_type='ss')\n self.replication.rename_snapshot_by_snap_id(\n sg_name, old_name, new_name, snap_id)\n snap_list = self.replication.get_storage_group_snapshot_list(\n sg_name)\n self.assertEqual(new_name, snap_list[0])\n # change name back so clean up will work automatically\n self.replication.rename_snapshot_by_snap_id(\n sg_name, new_name, old_name, snap_id)",
"def test_nickChange(self):\n oldnick = \"foo\"\n newnick = \"bar\"\n self.protocol.register(oldnick)\n self.protocol.irc_RPL_WELCOME(\"prefix\", [\"param\"])\n self.protocol.setNick(newnick)\n self.assertEqual(self.protocol.nickname, oldnick)\n self.protocol.irc_NICK(\"{}!quux@qux\".format(oldnick), [newnick])\n self.assertEqual(self.protocol.nickname, newnick)",
"def get_name_of_same_named_teams(sname, sid):\n\n if sname == 'Zamora':\n if sid == 9404:\n return 'Zamora FC' # Venezuela\n elif sid == 26394:\n return 'Zamora CF' # Spain\n else:\n logger.error('Sportmonk ids for same named teams \"%s\" have changed', sname)\n return sname\n elif sname == 'Atromitos':\n if sid == 9849:\n return 'Atromitos A.' # Greece\n elif sid == 2953:\n return 'Atromitos Yeroskipou' # Cyprus\n else:\n logger.error('Sportmonk ids for same named teams \"%s\" have changed', sname)\n return sname\n elif sname == 'Atromitos/H.': # sid 9842\n return None\n elif sname == 'Libertas':\n if sid == 11019:\n return 'Libertas Novska' # Croatia\n elif sid == 19357:\n return 'A.C Libertas' # San Marino\n else:\n logger.error('Sportmonk ids for same named teams \"%s\" have changed', sname)\n return sname\n elif sname == 'Irtysh':\n if sid == 4000:\n return 'Irtysh Omsk' # Russia\n elif sid == 11058:\n return 'Irtysh Pavlodar' # Kazakstan\n else:\n logger.error('Sportmonk ids for same named teams \"%s\" have changed', sname)\n return sname\n elif sname == 'Linense':\n if sid == 7812:\n return 'Atletico Linense' # Brazil\n elif sid == 26006:\n return 'Real Balompedica Linense' # SPain\n else:\n logger.error('Sportmonk ids for same named teams \"%s\" have changed', sname)\n return sname\n elif sname == 'Sorrento':\n if sid == 10773:\n return 'Sorrento FC' # Australia\n elif sid == 24305:\n return 'F.C. Sorrento' # Italy\n else:\n logger.error('Sportmonk ids for same named teams \"%s\" have changed', sname)\n return sname\n else:\n return sname",
"def test_name_shower(self):\n self.assertTrue(self.ec.name_shower(self.ec.names))",
"def test_groups_group_id_state_put(self):\n pass",
"def test_change_name_without_name(self):\r\n self.client.login(username=self.student.username, password='test')\r\n change_name_url = self.get_url()\r\n resp = self.client.post(change_name_url, {\r\n 'new_name': '',\r\n 'rationale': 'change identity'\r\n })\r\n response_data = json.loads(resp.content)\r\n self.assertFalse(response_data['success'])",
"def test_inconsistent_name(self):\n entries = {'uid=test,ou=people,dc=esmgquadrivium,dc=nl': {\n 'uid': ['test'],\n 'givenName': ['Maarten'],\n 'sn': ['Visscher'],\n 'cn': ['Wessel']}\n }\n with self.assertRaises(CloneError):\n clone(entries)",
"def test_ipam_vlan_groups_update(self):\n pass",
"def test_rename_snapshot(self):\n if self.is_v4:\n self.skipTest('Rename shapshot by generation does '\n 'not work on the V4.')\n snapshot_info, sg_name = self.create_sg_snapshot()\n old_name = snapshot_info.get('name')\n self.replication.rename_snapshot(\n sg_id=sg_name, snap_name=old_name, new_name='newname', gen_num=0)\n snap_list = self.replication.get_storage_group_snapshot_list(\n storage_group_id=sg_name)\n self.assertEqual('newname', snap_list[0])\n # change name back so clean up will work automatically\n self.replication.rename_snapshot(\n sg_id=sg_name, snap_name='newname', new_name=old_name,\n gen_num=0)",
"def _does_name_change_require_verification(user, old_name, new_name):\n return (\n is_verified_name_enabled()\n and old_name != new_name\n and len(get_certificates_for_user(user.username)) > 0\n )",
"def test_instance_naming_creation(os_info):\n NEUTRON.list_security_groups = mock.MagicMock(\n return_value=iter([{\"security_groups\": []}]))\n NEUTRON.create_subnet = mock.MagicMock(\n return_value={\"subnet\": SUBNETS}\n )\n\n instance_names = os_info.nodes_names\n for i in range(len(instance_names)):\n assert instance_names[i] == 'test-node-{}'.format(i + 1)",
"def test_update_server_name(self):\n fake_update_server = fakes.make_fake_server(\n self.server_id, self.updated_server_name\n )\n\n self.register_uris(\n [\n self.get_nova_discovery_mock_dict(),\n dict(\n method='GET',\n uri=self.get_mock_url(\n 'compute',\n 'public',\n append=['servers', self.server_name],\n ),\n status_code=404,\n ),\n dict(\n method='GET',\n uri=self.get_mock_url(\n 'compute',\n 'public',\n append=['servers', 'detail'],\n qs_elements=['name=%s' % self.server_name],\n ),\n json={'servers': [self.fake_server]},\n ),\n dict(\n method='PUT',\n uri=self.get_mock_url(\n 'compute', 'public', append=['servers', self.server_id]\n ),\n json={'server': fake_update_server},\n validate=dict(\n json={'server': {'name': self.updated_server_name}}\n ),\n ),\n dict(\n method='GET',\n uri=self.get_mock_url(\n 'network', 'public', append=['v2.0', 'networks']\n ),\n json={'networks': []},\n ),\n ]\n )\n self.assertEqual(\n self.updated_server_name,\n self.cloud.update_server(\n self.server_name, name=self.updated_server_name\n )['name'],\n )\n\n self.assert_calls()",
"def test_device_states_device_name_put(self):\n pass",
"def test_chgid_gid_same():\n mock_group = {\"passwd\": \"*\", \"gid\": 0, \"name\": \"test\", \"members\": [\"root\"]}\n mock_pre_gid = MagicMock(return_value=0)\n with patch.dict(mac_group.__salt__, {\"file.group_to_gid\": mock_pre_gid}), patch(\n \"salt.modules.mac_group.info\", MagicMock(return_value=mock_group)\n ):\n assert mac_group.chgid(\"test\", 0)",
"def test_persistent_group_and_role_change(dev):\n form(dev[0], dev[1])\n\n logger.info(\"Start and stop autonomous GO on previous P2P client device\")\n dev[1].p2p_start_go()\n dev[1].remove_group()\n dev[1].dump_monitor()\n\n logger.info(\"Re-invoke the persistent group\")\n invite_from_go(dev[0], dev[1])",
"def test_edit_duplicate(self):\n group = groups.get_by_name(\"First Group\")\n new_name = '6th group'\n self.open_url('/group/edit/{0}'.format(group.id))\n el = self.wd.find_element(By.ID, \"name\")\n el.clear()\n el.send_keys(new_name)\n self.submit_form(\"group_form\")\n self.assert_form_error(\"Group \\\"{0}\\\" already exists.\".format(new_name))",
"def testUseAltNamingOne(self):\n expected = (IMAGE_SERVER_PREFIX + '/stable-channel/x86-alex-rc/' +\n '0.12.433.269', '0.12.433.269', 'stable-channel', 'mp')\n actual = cb_name_lib.GetNameComponents(self.board, self.version_string, 1)\n self.assertEqual(expected, actual)",
"def test_update_port_group(self):\n pass",
"def test_break_security_group_usual_case_specify_sg():",
"def setCaptainName(self, empireID, id, name):\n try:\n myCaptain = self.captains[id]\n if myCaptain.empireID <> empireID:\n return 'cannot set captain name not owned by player requesting change'\n # make sure no other captain shares same name\n for captainID, otherCaptain in self.captains.iteritems():\n if otherCaptain.name == name and otherCaptain.id <> id:\n return 'Another captain already has the name:%s' % name\n myCaptain.setMyName(name)\n return 1\n except:\n return 'error-> could not set captain name'",
"def test_change_name_of_the_devicefalse():",
"def test_change_name_of_the_devicetrue():",
"def testUseAltNamingTwo(self):\n mod_prefix = '/'.join(IMAGE_SERVER_PREFIX.split('/')[:-1])\n expected = (mod_prefix + '/stable-channel/x86-alex/0.12.433.269',\n '0.12.433.269', 'stable-channel', 'mp')\n actual = cb_name_lib.GetNameComponents(self.board, self.version_string, 2)\n self.assertEqual(expected, actual)",
"def test_cleans_peer_name(self):\n ps1_status = self.pybird.get_peer_status(\"PS1{\\\"'}\")\n self.assertFalse(ps1_status['up'])",
"def setNetGroup(addr): #status: Done, not tested\r\n pass",
"def test_update_group(self):\n pass"
]
| [
"0.73724425",
"0.63812697",
"0.60749346",
"0.5786616",
"0.5737494",
"0.5689937",
"0.558545",
"0.556925",
"0.5448637",
"0.5431592",
"0.5371331",
"0.5291523",
"0.52891064",
"0.5282398",
"0.52451235",
"0.523351",
"0.52235484",
"0.52166736",
"0.5215772",
"0.5207367",
"0.52060276",
"0.5187579",
"0.5185397",
"0.5177662",
"0.51736087",
"0.51589626",
"0.5153398",
"0.5151467",
"0.5147766",
"0.5147227"
]
| 0.8209844 | 0 |
Parse a C constant value in decimal or hex. Return None if the input isn't a constant. | def convert_value(value):
# print "Attempting to match value: %s" % value
m = re.match('^(0(x|X)[0-9a-fA-F]+|[0-9]+)$', value)
if m is None:
return None
value = m.group(1)
if value.startswith("0x") or value.startswith("0X"):
return int(value[2:], 16)
else:
return int(value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_constant(name, libgmt):\n c_get_enum = libgmt.GMT_Get_Enum\n c_get_enum.argtypes = [ctypes.c_char_p]\n c_get_enum.restype = ctypes.c_int\n\n value = c_get_enum(name.encode())\n\n if value is None or value == -99999:\n raise GMTCLibError(\n \"Constant '{}' doesn't exits in libgmt.\".format(name))\n\n return value",
"def get_scalar_from_constant(expr):\n assert isinstance(expr, _expr.Constant) and not expr.data.shape, \\\n \"Expr is not a constant scalar.\"\n value = expr.data.asnumpy()\n if value.dtype == np.dtype(np.int32):\n return int(value)\n if value.dtype == np.dtype(np.float32):\n return float(value)\n assert False, \"Constant expr must be float32/int32\"\n return None # To suppress pylint",
"def t_CCONST(t):\n return t",
"def extractVal(value):\n assert value is not None, \"Value is None\"\n \n trimmed = value.strip()\n try:\n return int(trimmed)\n except ValueError:\n try:\n return float(trimmed)\n except ValueError:\n return str(trimmed)",
"def character_constant(self):\n col = self.pos\n try:\n self.match('\\'')\n\n # A character constant may be an escaped sequence\n # We assume a single alpha-numerical character or space\n if self.read() == '\\\\' and self.read(2).isprintable():\n value = self.read(2)\n self.pos += 2\n elif self.read().isprintable():\n value = self.read()\n self.pos += 1\n else:\n raise TokenError(\"Expected character.\")\n\n self.match('\\'')\n except TokenError:\n self.pos = col\n raise TokenError(\"Invalid character constant.\")\n\n constant = CharacterConstant(self.line, col, self.prev_white, value)\n return constant",
"def _rval(self, s):\n if common.is_num(s):\n return float(s)\n elif s.startswith('#'):\n return self.parent.constants[s[1:].lower()]\n else: # time-based ycomp code\n return s.lower()",
"def get_constant(v):\r\n if isinstance(v, Variable):\r\n try:\r\n return get_scalar_constant_value(v)\r\n except NotScalarConstantError:\r\n return None\r\n else:\r\n return v",
"def const(value) -> core.Const:\n return core.Const(value)",
"def const(string):\n\n\tconstparams = dict({\n\t\t\t\t\t'kb' : 1.3806e-23,\t# Boltzmann's constant\n\t\t\t\t\t'hbar' : 1.054e-34,\t# Planck's constant\n\t\t\t\t\t'topeta' : 1e15,\t# To peta-\n\t\t\t\t\t'totera' : 1e12,\t# To tera-\n\t\t\t\t\t'togiga' : 1e9,\t# To giga-\n\t\t\t\t\t'tomega' : 1e6,\t# To mega-\n\t\t\t\t\t'tokilo' : 1e3,\t# To kilo-\n\t\t\t\t\t'tocenti' : 1e-2,\t# To centi-\n\t\t\t\t\t'tomilli' : 1e-3,\t# To milli-\n\t\t\t\t\t'tomicro' : 1e-6,\t# To micro-\n\t\t\t\t\t'tonano' : 1e-9,\t# To nano-\n\t\t\t\t\t'topico' : 1e-12,\t# To pico-\n\t\t\t\t\t'tofemto' : 1e-15,\t# To femto-\n\t\t\t\t\t})\n\n\ttry:\n\t\treturn constparams[string]\n\texcept KeyError, e:\n\t\tprint \"KeyError: %s is not a valid key for ntpy.param.const().\" % e\n\t\traise",
"def constant(constant_value: float, value: float):\n\t\treturn constant_value",
"def _deduceValueType(value):\n\n\tif value.lower() == 'null':\n\t\treturn None\n\n\tif value.startswith(\"0x\"):\n\t\treturn bytes.fromhex(value[2:])\n\n\t# If value can be an int, float() will not raise\n\t# exception too\n\tresult = value\n\ttry:\n\t\tresult = float(value)\n\t\tresult = int(value)\n\texcept:\n\t\tpass\n\n\treturn result",
"def parse_value(self, tokens):\n\n value = 0\n while tokens:\n value |= self._parse_constant(tokens)\n if not tokens or tokens[0].type != 'BITWISE_OR':\n break\n tokens.pop(0)\n else:\n self._parser_state.error('empty constant')\n return value",
"def number(self):\n col = self.pos\n try:\n chars = []\n\n # Match optional period\n if self.read() == \".\":\n chars.append(self.read())\n self.pos += 1\n\n # Match required decimal digit\n if self.read().isdigit():\n chars.append(self.read())\n self.pos += 1\n else:\n raise TokenError(\"Expected digit.\")\n\n # Match any sequence of letters, digits, underscores,\n # periods and exponents\n exponents = [\"e+\", \"e-\", \"E+\", \"E-\", \"p+\", \"p-\", \"P+\", \"P-\"]\n while not self.eos():\n if self.read(2) in exponents:\n chars.append(self.read(2))\n self.pos += 2\n elif self.read().isalpha() or self.read().isdigit() or self.read() in [\"_\", \".\"]:\n chars.append(self.read())\n self.pos += 1\n else:\n break\n\n value = \"\".join(chars)\n except TokenError:\n self.pos = col\n raise TokenError(\"Invalid preprocessing number.\")\n\n constant = NumericalConstant(self.line, col, self.prev_white, value)\n return constant",
"def test_single_const(self):\n conf = {'electron_density.type': 'constant',\n 'electron_density.value': '2.6'}\n\n self.parser.parse(conf)\n density = self.parser.parameters['electron_density']\n\n self.assertIsInstance(density, Constant)\n self.assertEqual(density([0.2, 1.0, 0.4]), 2.6)",
"def handle_value(value):\n\n if value[-1] == 'x':\n return float(value[0:-1])\n\n if value[-1] == '%':\n return float(value[0:-1])\n\n if value[0].isdigit():\n return bytify(value)\n\n raise ValueError",
"def test_cime_constants(e3sm_tag='master'):\n\n resp = requests.get(\n f'https://raw.githubusercontent.com/E3SM-Project/E3SM/{e3sm_tag}/'\n f'share/util/shr_const_mod.F90')\n\n text = resp.text\n\n text = text.split('\\n')\n\n found = {}\n for constant in constants:\n found[constant] = False\n\n for line in text:\n constant, value = _parse_value(line)\n if constant is None:\n continue\n print(f'line: {line}')\n print(f'parsed: {constant} = {value}')\n if constant in constants:\n if isinstance(value, float):\n print('verifying {}'.format(constant))\n assert value == constants[constant]\n else:\n print('skipping verification for {}'.format(constant))\n\n found[constant] = True\n else:\n print('not in constants')\n\n print('')\n\n all_found = True\n for constant in found:\n if not found[constant]:\n print('{} was not found!'.format(constant))\n all_found = False\n\n assert all_found",
"def get_node_value(node: Node):\n if node.type != 'Const':\n raise Exception('Can\\'t get value for non-constant node {}'.format(node.name))\n return node.value",
"def _value(token):\n result = re.match(r'\\d*', '0' + token)\n return int(result.group(0))",
"def _parse(val: str):\n\n if not isinstance(val, str):\n raise TypeError(\"Method requires string input\")\n\n value = re.findall(r'^([-+]?\\d*\\.\\d*(?=\\s)|\\d+(?=\\s))', val)\n if not (value and val[:len(value[0])] == value[0]):\n return val, None\n\n # string starts with value\n value = value[0]\n val = val[len(value):]\n\n val = val.strip()\n if val:\n unit = val\n else:\n unit = 'dimensionless'\n\n return value, unit",
"def _parse_env_value(val):\n if val.lower() == \"false\":\n return False\n elif val.lower() == \"true\":\n return True\n try:\n return int(val)\n except ValueError:\n pass\n try:\n return float(val)\n except ValueError:\n pass\n return val",
"def get_constant16(binary_addr):\n\n if binary_addr in expressions:\n return get_expression(binary_addr, memorymanager.get_u16_binary(binary_addr))\n return mainformatter.constant16(binary_addr)",
"def is_constant(s):\n return ((s[0] >= '0' and s[0] <= '9') or (s[0] >= 'a' and s[0] <= 'd')) and s.isalnum()",
"def _ConvertScalarFieldValue(value, field, path, require_str=False):\n try:\n if field.cpp_type in _INT_TYPES:\n return _ConvertInteger(value)\n elif field.cpp_type in _FLOAT_TYPES:\n return _ConvertFloat(value, field)\n elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:\n return _ConvertBool(value, require_str)\n elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:\n if field.type == descriptor.FieldDescriptor.TYPE_BYTES:\n if isinstance(value, str):\n encoded = value.encode('utf-8')\n else:\n encoded = value\n # Add extra padding '='\n padded_value = encoded + b'=' * (4 - len(encoded) % 4)\n return base64.urlsafe_b64decode(padded_value)\n else:\n # Checking for unpaired surrogates appears to be unreliable,\n # depending on the specific Python version, so we check manually.\n if _UNPAIRED_SURROGATE_PATTERN.search(value):\n raise ParseError('Unpaired surrogate')\n return value\n elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:\n # Convert an enum value.\n enum_value = field.enum_type.values_by_name.get(value, None)\n if enum_value is None:\n try:\n number = int(value)\n enum_value = field.enum_type.values_by_number.get(number, None)\n except ValueError:\n raise ParseError('Invalid enum value {0} for enum type {1}'.format(\n value, field.enum_type.full_name))\n if enum_value is None:\n if field.file.syntax == 'proto3':\n # Proto3 accepts unknown enums.\n return number\n raise ParseError('Invalid enum value {0} for enum type {1}'.format(\n value, field.enum_type.full_name))\n return enum_value.number\n except ParseError as e:\n raise ParseError('{0} at {1}'.format(e, path))",
"def _getvalue_expr_Constant(self, expr: ast.Constant) -> Any:\n return expr.value",
"def parse_float(value):\n try:\n return float(value)\n except (ValueError, TypeError):\n return None",
"def value_convert(x):\n try:\n return x.decode(\"ascii\")\n except UnicodeDecodeError:\n return x.hex()",
"def parseColor(c):\n if c in baseColors:\n return baseColors[c]\n if len(c) == 6:\n return tuple(map(lambda x: int(x, 16), (c[:2], c[2:4], c[4:])))\n if len(c) == 3:\n return tuple(map(lambda x: 16*int(x, 16), c))\n raise ValueError(\"Can't find color '{}'\".format(c))",
"def evaluate_constant(expr: Any) -> Any:\n all_options = traverse_all_options(expr)\n if len(all_options) > 1:\n raise ValueError(f'{expr} is not evaluated to a constant. All possible values are: {all_options}')\n res = all_options[0]\n return res",
"def hex2value10(hex_str):\n return hex2int(hex_str) / 10.0",
"def try_parse_as_voltage_constant(self, line: str):\n tokens = line.split()\n if len(tokens) != 3: return False\n if tokens[1] != \"=\": return False\n\n value = tokens[2]\n value = self.remove_ending(value, \"v\")\n value = self.parse_float_value(value)\n\n self.voltage_constants[tokens[0]] = value\n\n return True"
]
| [
"0.5632588",
"0.5586047",
"0.5533818",
"0.55302715",
"0.5484222",
"0.5482968",
"0.54275924",
"0.5395987",
"0.53449136",
"0.5325055",
"0.52999747",
"0.5280379",
"0.5259344",
"0.52487993",
"0.52158546",
"0.519826",
"0.5152143",
"0.51404864",
"0.51237184",
"0.51235414",
"0.51213604",
"0.5120128",
"0.51169986",
"0.5095728",
"0.50948894",
"0.50855166",
"0.5079269",
"0.5073661",
"0.5070811",
"0.50431246"
]
| 0.56358886 | 0 |
This method is called when the handler should emit the record. By default, SocketHandler will silently drop a message if it cannot send it. Because this is not desired in our case, we will use a queue that will act as a buffer if the message is not sent | def emit(self, record):
self.buffer.append(record)
while len(self.buffer) != 0:
nextRecord = self.buffer.popleft()
super().emit(nextRecord)
if self.sock is None: # If we failed to send the record
self.buffer.appendleft(nextRecord)
break | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def emit(self, record):\n try:\n # Format: [ [queueMsgID, PID], record ]\n self.queue.put([[config.DUMMYMP_LOG_ID, os.getpid(), self.int_pid], record])\n except:\n # Something went wrong...\n self.handleError(record)",
"def handle_write(self):\n #send_types = \" + \".join(\n # messages.get_message_type(message) for message in self.buffer)\n for message in self.buffer:\n if isinstance(message, str):\n self.send(message)\n else:\n self.send(message.pack())\n self.buffer = []\n #print \"To %s:%s sent: \" % (self.address, self.port), send_types",
"def emit(self, record):\r\n try:\r\n self.enqueue(self.prepare(record))\r\n except Exception:\r\n self.handleError(record)",
"def handle_write(self):\n sent = self.send(self.append_send_buffer)\n self.append_send_buffer = self.append_send_buffer[sent:]",
"def handle_write(self):\n sent = self.send(self.append_send_buffer)\n self.append_send_buffer = self.append_send_buffer[sent:]",
"def handle_write(self):\n sent = self.send(self.append_send_buffer)\n self.append_send_buffer = self.append_send_buffer[sent:]",
"def queue_handler(self):\n work_queue = []\n query_count = 0\n\n while query_count < self.count:\n work_queue.append(self.build_packet(self.record))\n query_count += 1\n\n self.send_queries(work_queue)",
"def emit(self, record):\n email_log = EMAIL_TIMES[self.level]\n email_backlog = EMAIL_BACKLOG[self.level]\n now = time.time()\n\n # Get the time of the oldest email\n oldest_email_time = min(email_log)\n # If the oldest email was sent more than throttle time ago, allow this\n # one through\n if oldest_email_time < (now - EMAIL_THROTTLE_TIME):\n email_log.append(now)\n # If there is a backlog, add it to the message before sending\n if len(email_backlog) > 0:\n backlog = '\\n'.join(email_backlog)\n # Explicitely convert record.msg to str to allow for\n # logging.exception() with exception as arg instead of msg\n record.msg = str(record.msg) + '\\n\\nBacklog:\\n' + backlog\n email_backlog.clear()\n\n super(CustomSMTPHandler, self).emit(record)\n else:\n email_backlog.append(self.formatter.format(record))",
"def emit(self, record):\n try:\n e_inf = record.exc_info\n if e_inf:\n # side-effect: sets record.exc_text\n dummy = self.format(record)\n record.exc_info = None\n # pylint: disable=W0104\n # Statement seems to have no effect\n dummy # pflakes ;/\n record.handle = (self.session.handle\n if self.session is not None else None)\n if self.session is not None:\n self.session.lock.acquire()\n self.oqueue.send(('logger', record))\n if self.session is not None:\n self.session.lock.release()\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n self.handleError(record)",
"def emit(self, record):\n try:\n msg = self.format(record)\n stream = self.stream\n stream.Write_shared((msg + self.terminator).encode(self.encoding))\n # self.flush()\n except Exception:\n self.handleError(record)",
"def emit(self, record: LogRecord):\n try:\n self.enqueue(self.prepare(record))\n except Exception:\n self.handleError(record)",
"def flush_record(self):\n self.__mux.acquire()\n try:\n if self.datatype == 'csv':\n self.__csv_flush_record()\n elif self.datatype == 'json':\n self.__json_flush_record()\n elif self.datatype == 'all':\n self.__csv_flush_record(all=True)\n self.__json_flush_record(all=True)\n except Exception as e:\n log.debug(\"data_writer: Failed to flush queue: {0}: {1}\".format(str(e), [traceback.format_exc()]))\n finally:\n try:\n if sys.version_info[0] < 3:\n del self.__queue[:]\n else:\n self.__queue.clear()\n except Exception as e:\n log.error(\"data_writer: Failed to clear datawriter queue: {0}\".format(str(e)))\n self.__mux.release()",
"def _send(self, message):\r\n if not message:\r\n return\r\n\r\n self._maybe_print('twitch out queued: ' + message)\r\n self.buffer.append(message + \"\\n\")",
"def handle_write(self):\n self.initiate_send()",
"def flush(self):\n self._pending_flush = False\n\n if self.handler is None or not self.handler.active or not self.send_queue:\n return\n\n self.handler.send_pack('a[%s]' % self.send_queue)\n self.send_queue = ''",
"def emit(self, record):\n try:\n ei = record.exc_info\n if ei:\n dummy = self.format(record) # just to get traceback text into record.exc_text\n record.exc_info = None # not needed any more\n self.queue.put_nowait(record)\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n print(\"Uncaught exception - ignoring\")\n traceback.print_exc()\n self.handleError(record)",
"def enqueue(self, record):\r\n self.queue.put_nowait(record)",
"async def sender(self):\n out = await self.output_queue.get()\n if not out.ready():\n logger.info(\">>> Requeuing {}\".format(out))\n await self.output_queue.put(out)\n await asyncio.sleep(0.05)\n return\n if out.expired():\n logger.info(\">>> Discarding {}\".format(out))\n out.discarded = True\n return\n content = [out.content] if type(out.content) is str else out.content\n logger.info(\">>> Sending:\\n{}\".format(content))\n await self.websocket.send(json.dumps(content))\n out.sent = True\n await asyncio.sleep(len(content) * 0.5)",
"def send_blocking_signal(self, compression=True):\n while not self._stop_receive.is_set():\n if len(self._send_queue) > 0:\n super(MastermindClientUDP, self).send(JSONSerializer.serialize(self._send_queue.pop()), compression)\n else:\n super(MastermindClientUDP, self).send(JSONSerializer.serialize(DummyEvent()), compression)\n time.sleep(1)",
"def send_emission(self):\n if self._emit_queue.empty():\n return\n emit = self._emit_queue.get()\n emit()",
"def emit(self, record):\n self.buffer.append(record.__dict__)",
"def emit(self, record):\n self.buffer.append(record.__dict__)",
"def write_handler(socket, buf):\n while True:\n try:\n message = buf.pop()\n logging.debug(\"sending data : %s\", message)\n socket.send(message)\n except IndexError:\n time.sleep(WAIT_INTERVAL)",
"def _send(self) -> None:\n if not self.connected or now() < self.next_send:\n return\n self.next_send += self.poll_interval\n buff = []\n while self.outq:\n msg_id, tag, data = self.outq.popleft()\n buff.append(pickle.dumps((msg_id, tag, data)))\n if buff:\n stream = b\"\".join(buff)\n self.endpoint.sendall(stream)",
"def add_to_send_queue(self, data):\n if self.socket is not None:\n self.send_queue.put(data)",
"def emit(self, record):\n try:\n topic, record.msg = record.msg.split(TOPIC_DELIM,1)\n except Exception:\n topic = \"\"\n try:\n bmsg = cast_bytes(self.format(record))\n except Exception:\n self.handleError(record)\n return\n \n if isinstance(topic, str):\n btopic = cast_bytes(topic)\n else:\n print(\"Exception: topic is not string:{topic}\".format(topic=topic))\n btopic = b'Debug' \n\n self.socket.send_multipart([btopic, bmsg])",
"def emit(self, record):\n\t\ttry:\n\t\t\tmsg = self.format(record).encode('utf-8')\n\n\t\t\tif self._write_ready:\n\t\t\t\ttry:\n\t\t\t\t\tself._socket.sendall(msg)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(\"Error when writing to syslog '{}'\".format(self._address), e, file=sys.stderr)\n\t\t\t\t\tself._enqueue(msg)\n\n\t\t\telse:\n\t\t\t\tself._enqueue(msg)\n\n\n\t\texcept Exception as e:\n\t\t\tprint(\"Error when emit to syslog '{}'\".format(self._address), e, file=sys.stderr)\n\t\t\tself.handleError(record)",
"def handle_write(self):\n # without overriding this we would get an \"unhandled write event\"\n # message from asyncore once connection occurs.",
"def write(self, record):\n if not record:\n return\n\n # Convert to a dict - inefficient, I know...\n if type(record) is DASRecord:\n record = json.loads(record.as_json())\n if type(record) is dict:\n # If our local queue is full, throw away the oldest entries\n while self.send_queue.full():\n try:\n logging.debug('CachedDataWriter queue full - dropping oldest...')\n self.send_queue.get_nowait()\n except asyncio.QueueEmpty:\n logging.warning('CachedDataWriter queue is both full and empty?!?')\n\n # Enqueue our latest record for send\n self.send_queue.put_nowait(record)\n else:\n logging.warning('CachedDataWriter got non-dict/DASRecord object of '\n 'type %s: %s', type(record), str(record))",
"def _send(self, message):\n logger.info(message)\n self.buffer.put(message)"
]
| [
"0.6777097",
"0.67079544",
"0.6660982",
"0.66555536",
"0.66555536",
"0.66555536",
"0.6519431",
"0.6481583",
"0.6451602",
"0.64207643",
"0.64196646",
"0.64065015",
"0.6357897",
"0.6326828",
"0.62853485",
"0.6271823",
"0.62639964",
"0.6251536",
"0.6215217",
"0.61984706",
"0.61949575",
"0.61949575",
"0.61913216",
"0.61857957",
"0.6129803",
"0.61181664",
"0.6096937",
"0.6091398",
"0.6080871",
"0.60611045"
]
| 0.7365688 | 0 |
Test case for rest_v20_dd_systems_systemid_stats_capacity_get | def test_rest_v20_dd_systems_systemid_stats_capacity_get(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_capacity(self, meta, raven_vars, dispatch, t, raw=False):\n return self.get_interaction().get_capacity(meta, raven_vars, dispatch, t, raw=raw)",
"def get_capacity():\n fs.get_capacity()",
"def test_get_cluster_capacity_monthly_resolution(self):\n url = \"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly\"\n query_params = self.mocked_query_params(url, OCPCpuView)\n handler = OCPReportQueryHandler(query_params)\n query_data = [{\"row\": 1}]\n query_data, total_capacity = handler.get_cluster_capacity(query_data)\n self.assertTrue(\"capacity\" in total_capacity)\n self.assertTrue(isinstance(total_capacity[\"capacity\"], Decimal))\n self.assertTrue(\"capacity\" in query_data[0])\n self.assertIsNotNone(query_data[0].get(\"capacity\"))\n self.assertIsNotNone(total_capacity.get(\"capacity\"))\n self.assertEqual(query_data[0].get(\"capacity\"), total_capacity.get(\"capacity\"))",
"def test_get_cluster_capacity_daily_resolution(self):\n url = \"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=daily\"\n query_params = self.mocked_query_params(url, OCPCpuView)\n handler = OCPReportQueryHandler(query_params)\n query_data = handler.execute_query()\n\n daily_capacity = defaultdict(Decimal)\n total_capacity = Decimal(0)\n query_filter = handler.query_filter\n query_group_by = [\"usage_start\", \"cluster_id\"]\n annotations = {\"capacity\": Max(\"cluster_capacity_cpu_core_hours\")}\n cap_key = list(annotations.keys())[0]\n\n q_table = handler._mapper.provider_map.get(\"tables\").get(\"query\")\n query = q_table.objects.filter(query_filter)\n\n with tenant_context(self.tenant):\n cap_data = query.values(*query_group_by).annotate(**annotations)\n for entry in cap_data:\n date = handler.date_to_string(entry.get(\"usage_start\"))\n daily_capacity[date] += entry.get(cap_key, 0)\n cap_data = query.values(*query_group_by).annotate(**annotations)\n for entry in cap_data:\n total_capacity += entry.get(cap_key, 0)\n\n self.assertEqual(query_data.get(\"total\", {}).get(\"capacity\", {}).get(\"value\"), total_capacity)\n for entry in query_data.get(\"data\", []):\n date = entry.get(\"date\")\n values = entry.get(\"values\")\n if values:\n capacity = values[0].get(\"capacity\", {}).get(\"value\")\n self.assertEqual(capacity, daily_capacity[date])",
"def get_capacity_param(self):\n intr = self.get_interaction()\n return intr.get_capacity(None, None, None, None, raw=True)",
"def capacity(self) -> Capacity:\n raw = self._call('GET', 'capacity')\n return Capacity.parse_raw(raw)",
"def system_capacity_ac(self):\n return (self.sam_sys_inputs['system_capacity']\n / self.sam_sys_inputs['dc_ac_ratio'])",
"def get_capacity_var(self):\n return self._capacity_var",
"def get_cluster_capacity_info(cluster_id):\n cpu_capacity_info = get_node_data(cluster_id)\n cpu_capacity_in_cores = round(unit_conversion(sum([int(''.join(filter(\n str.isdigit, str(item['status']['allocatable']['cpu'])))) for item in cpu_capacity_info]), 'm'), 2)\n memory_capacity_in_gib = round(sum(\n [unit_conversion(int(''.join(filter(str.isdigit, str(item['status']['allocatable']['memory'])))),\n ''.join(filter(str.isalpha, str(item['status']['allocatable']['memory']))))\n for item in cpu_capacity_info]), 2)\n return {'cpu': cpu_capacity_in_cores, 'memory': memory_capacity_in_gib}",
"def get_capacity():\n\n try:\n raw_capacity = PLIST[\"TotalSize\"]\n raw_capacity = str(raw_capacity)\n\n except KeyError:\n return \"Unknown\", \"Unknown\"\n\n #Round the sizes to make them human-readable.\n unit_list = [None, \"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\"]\n unit = \"B\"\n human_readable_size = int(raw_capacity)\n\n try:\n while len(str(human_readable_size)) > 3:\n #Shift up one unit.\n unit = unit_list[unit_list.index(unit)+1]\n human_readable_size = human_readable_size//1000\n\n except IndexError:\n return \"Unknown\", \"Unknown\"\n\n #Include the unit in the result for both exact and human-readable sizes.\n return raw_capacity, str(human_readable_size)+\" \"+unit",
"def test_get_cluster_capacity_monthly_resolution_group_by_cluster(self):\n url = \"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&group_by[cluster]=*\" # noqa: E501\n query_params = self.mocked_query_params(url, OCPCpuView)\n handler = OCPReportQueryHandler(query_params)\n query_data = handler.execute_query()\n\n capacity_by_cluster = defaultdict(Decimal)\n total_capacity = Decimal(0)\n query_filter = handler.query_filter\n query_group_by = [\"usage_start\", \"cluster_id\"]\n annotations = {\"capacity\": Max(\"cluster_capacity_cpu_core_hours\")}\n cap_key = list(annotations.keys())[0]\n\n q_table = handler._mapper.provider_map.get(\"tables\").get(\"query\")\n query = q_table.objects.filter(query_filter)\n\n with tenant_context(self.tenant):\n cap_data = query.values(*query_group_by).annotate(**annotations)\n for entry in cap_data:\n cluster_id = entry.get(\"cluster_id\", \"\")\n capacity_by_cluster[cluster_id] += entry.get(cap_key, 0)\n total_capacity += entry.get(cap_key, 0)\n\n for entry in query_data.get(\"data\", []):\n for cluster in entry.get(\"clusters\", []):\n cluster_name = cluster.get(\"cluster\", \"\")\n capacity = cluster.get(\"values\")[0].get(\"capacity\", {}).get(\"value\")\n self.assertEqual(capacity, capacity_by_cluster[cluster_name])\n\n self.assertEqual(query_data.get(\"total\", {}).get(\"capacity\", {}).get(\"value\"), total_capacity)",
"def test_get_cluster_capacity_daily_resolution_group_by_clusters(self):\n url = (\n \"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=daily&group_by[cluster]=*\"\n )\n query_params = self.mocked_query_params(url, OCPCpuView)\n handler = OCPReportQueryHandler(query_params)\n query_data = handler.execute_query()\n\n daily_capacity_by_cluster = defaultdict(dict)\n total_capacity = Decimal(0)\n query_filter = handler.query_filter\n query_group_by = [\"usage_start\", \"cluster_id\"]\n annotations = {\"capacity\": Max(\"cluster_capacity_cpu_core_hours\")}\n cap_key = list(annotations.keys())[0]\n\n q_table = handler._mapper.query_table\n query = q_table.objects.filter(query_filter)\n\n with tenant_context(self.tenant):\n cap_data = query.values(*query_group_by).annotate(**annotations)\n for entry in cap_data:\n date = handler.date_to_string(entry.get(\"usage_start\"))\n cluster_id = entry.get(\"cluster_id\", \"\")\n if cluster_id in daily_capacity_by_cluster[date]:\n daily_capacity_by_cluster[date][cluster_id] += entry.get(cap_key, 0)\n else:\n daily_capacity_by_cluster[date][cluster_id] = entry.get(cap_key, 0)\n total_capacity += entry.get(cap_key, 0)\n\n for entry in query_data.get(\"data\", []):\n date = entry.get(\"date\")\n for cluster in entry.get(\"clusters\", []):\n cluster_name = cluster.get(\"cluster\", \"\")\n capacity = cluster.get(\"values\")[0].get(\"capacity\", {}).get(\"value\")\n self.assertEqual(capacity, daily_capacity_by_cluster[date][cluster_name])\n\n self.assertEqual(query_data.get(\"total\", {}).get(\"capacity\", {}).get(\"value\"), total_capacity)",
"def get_capacity(self, meta, raven_vars, dispatch, t, raw=False):\n if raw:\n return self._capacity\n request = {self._capacity_var: None}\n inputs = {'request': request,\n 'meta': meta,\n 'raven_vars': raven_vars,\n 'dispatch': dispatch,\n 't': t}\n evaluated, meta = self._capacity.evaluate(inputs, target_var=self._capacity_var)\n return evaluated, meta",
"def get_capacity_var(self):\n return self.get_interaction().get_capacity_var()",
"def get_cluster_capacity(self, query_data): # noqa: C901\n annotations = self._mapper.report_type_map.get(\"capacity_aggregate\")\n if not annotations:\n return query_data, {}\n\n cap_key = list(annotations.keys())[0]\n total_capacity = Decimal(0)\n daily_total_capacity = defaultdict(Decimal)\n capacity_by_cluster = defaultdict(Decimal)\n daily_capacity_by_cluster = defaultdict(lambda: defaultdict(Decimal))\n\n q_table = self._mapper.query_table\n query = q_table.objects.filter(self.query_filter)\n query_group_by = [\"usage_start\", \"cluster_id\"]\n\n with tenant_context(self.tenant):\n cap_data = query.values(*query_group_by).annotate(**annotations)\n for entry in cap_data:\n cluster_id = entry.get(\"cluster_id\", \"\")\n usage_start = entry.get(\"usage_start\", \"\")\n if isinstance(usage_start, datetime.date):\n usage_start = usage_start.isoformat()\n cap_value = entry.get(cap_key, 0)\n if cap_value is None:\n cap_value = 0\n capacity_by_cluster[cluster_id] += cap_value\n daily_capacity_by_cluster[usage_start][cluster_id] = cap_value\n daily_total_capacity[usage_start] += cap_value\n total_capacity += cap_value\n\n if self.resolution == \"daily\":\n for row in query_data:\n cluster_id = row.get(\"cluster\")\n date = row.get(\"date\")\n if cluster_id:\n row[cap_key] = daily_capacity_by_cluster.get(date, {}).get(cluster_id, Decimal(0))\n else:\n row[cap_key] = daily_total_capacity.get(date, Decimal(0))\n elif self.resolution == \"monthly\":\n for row in query_data:\n cluster_id = row.get(\"cluster\")\n if cluster_id:\n row[cap_key] = capacity_by_cluster.get(cluster_id, Decimal(0))\n else:\n row[cap_key] = total_capacity\n\n return query_data, {cap_key: total_capacity}",
"def test_read_cluster_resource_quota(self):\n pass",
"def get_used_capacity(self,tot=\"50\"):\n data=self.at_cmd(\"CPMS?\")\n index=data[1].find(tot)-1\n if data[1][index-1]==',':\n return data[index]\n else:\n return data[1][index-1:index]",
"def test_list_cluster_resource_quota(self):\n pass",
"def test_read_cluster_resource_quota_status(self):\n pass",
"def get_num_slots(self):\n # Your code here\n return self.capacity",
"def Capacity(self) -> int:",
"def capacity_gb(self) -> str:\n return pulumi.get(self, \"capacity_gb\")",
"def _get_capacity_info(self, nfs_share):\n nms = self.share2nms[nfs_share]\n ns_volume, ns_folder = self._get_share_datasets(nfs_share)\n folder_props = nms.folder.get_child_props('%s/%s' % (ns_volume,\n ns_folder),\n 'used|available')\n free = utils.str2size(folder_props['available'])\n allocated = utils.str2size(folder_props['used'])\n self.shares_with_capacities[nfs_share] = {\n 'free': utils.str2gib_size(free),\n 'total': utils.str2gib_size(free + allocated)}\n return free + allocated, free, allocated",
"def sys_service_memory():\n sort_cmd = [\"sort\", \"-k\", \"2nr\"]\n\n p_table = prettytable.PrettyTable(\n ['Service',\n 'Resident Set Size (MiB)',\n ], caching=False)\n p_table.align = 'l'\n p_table.align['Resident Set Size (MiB)'] = 'r'\n\n try:\n output = pipe_command(GREP_CMD, AWK_CMD, sort_cmd,\n cwd=MEMPATH + \"system.slice\")\n LOG.debug(\n 'command: %s\\n%s',\n ' '.join(GREP_CMD + [MEMPATH] + AWK_CMD + sort_cmd), output)\n except subprocess.CalledProcessError as error:\n LOG.error('Could not get total_rss memory, error=%s', error)\n return 1\n\n for line in output.split(\"\\n\"):\n service = line.split(\"memory.stat:total_rss \")[0]\n rss_mem = line.split(\"memory.stat:total_rss \")[-1]\n p_table.add_row(\n [service,\n mem_to_mebibytes(rss_mem),\n ])\n\n # Delete first row wich display total system.slice rss\n p_table.del_row(0)\n return p_table",
"def test_techs_energy_capacity_systemwide_constraint(self, bound):\n\n def check_bounds(constraint):\n assert po.value(constraint.upper) == 20\n if bound == \"equals\":\n assert po.value(constraint.lower) == 20\n if bound == \"max\":\n assert po.value(constraint.lower) is None\n\n m = build_model(\n {f\"techs.test_supply_elec.constraints.energy_cap_{bound}_systemwide\": 20},\n \"simple_supply,two_hours,investment_costs\",\n )\n m.run(build_only=True)\n assert hasattr(m._backend_model, \"energy_capacity_systemwide_constraint\")\n assert (\n \"test_supply_elec\"\n in m._backend_model.energy_capacity_systemwide_constraint.keys()\n )\n check_bounds(\n m._backend_model.energy_capacity_systemwide_constraint[\"test_supply_elec\"]\n )\n\n # Check that a model without transmission techs doesn't cause an error\n m = build_model(\n {f\"techs.test_supply_elec.constraints.energy_cap_{bound}_systemwide\": 20},\n \"simple_supply,two_hours,investment_costs\",\n model_file=\"model_minimal.yaml\",\n )\n m.run(build_only=True)\n assert hasattr(m._backend_model, \"energy_capacity_systemwide_constraint\")\n check_bounds(\n m._backend_model.energy_capacity_systemwide_constraint[\"test_supply_elec\"]\n )",
"def _get_host_utilization(context, host, ram_mb, disk_gb):\n instances = instance_get_all_by_host(context, host)\n vms = len(instances)\n free_ram_mb = ram_mb - FLAGS.reserved_host_memory_mb\n free_disk_gb = disk_gb - (FLAGS.reserved_host_disk_mb * 1024)\n\n work = 0\n for instance in instances:\n free_ram_mb -= instance.memory_mb\n free_disk_gb -= instance.root_gb\n free_disk_gb -= instance.ephemeral_gb\n if instance.vm_state in [vm_states.BUILDING, vm_states.REBUILDING,\n vm_states.MIGRATING, vm_states.RESIZING]:\n work += 1\n return dict(free_ram_mb=free_ram_mb,\n free_disk_gb=free_disk_gb,\n current_workload=work,\n running_vms=vms)",
"def capacitygroup_group():",
"def test_loc_techs_storage_capacity_constraint(self):\n m = build_model({}, \"simple_storage,two_hours,investment_costs\")\n m.run(build_only=True)\n assert hasattr(m._backend_model, \"storage_max_constraint\")\n\n m = build_model({}, \"simple_supply_and_supply_plus,two_hours,investment_costs\")\n m.run(build_only=True)\n assert hasattr(m._backend_model, \"storage_max_constraint\")\n\n m = build_model(\n {\"techs.test_storage.constraints.storage_cap_equals\": 20},\n \"simple_storage,two_hours,investment_costs\",\n )\n m.run(build_only=True)\n assert m._backend_model.storage_cap[\"a\", \"test_storage\"].ub == 20\n assert m._backend_model.storage_cap[\"a\", \"test_storage\"].lb == 20",
"def capacity_reports():\n return render_template('capacity.html')",
"def test_capacity(self, space_each_type):\n tspace = build_required_space(space_each_type, type_requirement=\"real\")\n assert tspace.cardinality == numpy.inf\n\n space = Space()\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", 2, 3, 4)\n dim = Categorical(\"yolo\", OrderedDict(zip(categories, probs)), shape=2)\n space.register(dim)\n dim = Integer(\"yolo2\", \"uniform\", -3, 6)\n space.register(dim)\n tspace = build_required_space(space, type_requirement=\"integer\")\n assert tspace.cardinality == (4**2) * (6 + 1)\n\n dim = Integer(\"yolo3\", \"uniform\", -3, 6, shape=(2, 1))\n space.register(dim)\n tspace = build_required_space(space, type_requirement=\"integer\")\n assert tspace.cardinality == (4**2) * (6 + 1) * ((6 + 1) ** (2 * 1))\n\n tspace = build_required_space(\n space, type_requirement=\"integer\", shape_requirement=\"flattened\"\n )\n assert tspace.cardinality == (4**2) * (6 + 1) * ((6 + 1) ** (2 * 1))\n\n tspace = build_required_space(\n space, type_requirement=\"integer\", dist_requirement=\"linear\"\n )\n assert tspace.cardinality == (4**2) * (6 + 1) * ((6 + 1) ** (2 * 1))"
]
| [
"0.6880177",
"0.6838495",
"0.67628455",
"0.6728308",
"0.66688097",
"0.6290439",
"0.62468123",
"0.61392134",
"0.60665524",
"0.6012225",
"0.59929657",
"0.5983629",
"0.5947236",
"0.5928955",
"0.5917592",
"0.59040964",
"0.5896049",
"0.5859963",
"0.58585066",
"0.5814054",
"0.57612604",
"0.57576805",
"0.57353795",
"0.5726745",
"0.5694757",
"0.5679303",
"0.5663503",
"0.56486136",
"0.5638382",
"0.5635187"
]
| 0.96901834 | 0 |
make node without archiving, create temp table, take full backup, check that temp table not present in backup catalogue | def test_exclude_temp_tables(self):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'shared_buffers': '1GB',
"fsync": "off", 'ptrack_enable': 'on'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
conn = node.connect()
with node.connect("postgres") as conn:
conn.execute("create temp table test as select generate_series(0,50050000)::text")
conn.commit()
temp_schema_name = conn.execute("SELECT nspname FROM pg_namespace WHERE oid = pg_my_temp_schema()")[0][0]
conn.commit()
temp_toast_schema_name = "pg_toast_" + temp_schema_name.replace("pg_", "")
conn.commit()
conn.execute("create index test_idx on test (generate_series)")
conn.commit()
heap_path = conn.execute("select pg_relation_filepath('test')")[0][0]
conn.commit()
index_path = conn.execute("select pg_relation_filepath('test_idx')")[0][0]
conn.commit()
heap_oid = conn.execute("select 'test'::regclass::oid")[0][0]
conn.commit()
toast_path = conn.execute("select pg_relation_filepath('{0}.{1}')".format(temp_toast_schema_name, "pg_toast_" + str(heap_oid)))[0][0]
conn.commit()
toast_idx_path = conn.execute("select pg_relation_filepath('{0}.{1}')".format(temp_toast_schema_name, "pg_toast_" + str(heap_oid) + "_index"))[0][0]
conn.commit()
temp_table_filename = os.path.basename(heap_path)
temp_idx_filename = os.path.basename(index_path)
temp_toast_filename = os.path.basename(toast_path)
temp_idx_toast_filename = os.path.basename(toast_idx_path)
self.backup_node(backup_dir, 'node', node, backup_type='full', options=['--stream'])
for root, dirs, files in os.walk(backup_dir):
for file in files:
if file in [temp_table_filename, temp_table_filename + ".1",
temp_idx_filename,
temp_idx_filename + ".1",
temp_toast_filename,
temp_toast_filename + ".1",
temp_idx_toast_filename,
temp_idx_toast_filename + ".1"]:
self.assertEqual(1, 0, "Found temp table file in backup catalogue.\n Filepath: {0}".format(file))
# Clean after yourself
self.del_test_dir(module_name, fname) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_backup_compact(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster_validate()\n self.backup_compact_validate()",
"def test_backup_restore_with_lesser_nodes(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n rest_conn = RestConnection(self.backupset.restore_cluster_host)\n rest_conn.add_node(self.input.clusters[0][1].rest_username, self.input.clusters[0][1].rest_password,\n self.input.clusters[0][1].ip)\n rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [], [])\n rebalance.result()\n self.backup_create()\n self.backup_cluster_validate()\n self.backup_restore_validate()",
"def test_exclude_unlogged_tables(self):\n fname = self.id().split('.')[3]\n backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')\n node = self.make_simple_node(base_dir=\"{0}/{1}/node\".format(module_name, fname),\n set_replication=True,\n initdb_params=['--data-checksums'],\n pg_options={'wal_level': 'replica', 'max_wal_senders': '2', \"shared_buffers\": \"1GB\", \"fsync\": \"off\", 'ptrack_enable': 'on'}\n )\n\n self.init_pb(backup_dir)\n self.add_instance(backup_dir, 'node', node)\n node.start()\n\n conn = node.connect()\n with node.connect(\"postgres\") as conn:\n\n conn.execute(\"create unlogged table test as select generate_series(0,50050000)::text\")\n conn.commit()\n\n conn.execute(\"create index test_idx on test (generate_series)\")\n conn.commit()\n\n heap_path = conn.execute(\"select pg_relation_filepath('test')\")[0][0]\n conn.commit()\n\n index_path = conn.execute(\"select pg_relation_filepath('test_idx')\")[0][0]\n conn.commit()\n index_init_path = index_path + \"_init\"\n\n heap_oid = conn.execute(\"select 'test'::regclass::oid\")[0][0]\n conn.commit()\n\n toast_path = conn.execute(\"select pg_relation_filepath('{0}.{1}')\".format(\"pg_toast\", \"pg_toast_\" + str(heap_oid)))[0][0]\n conn.commit()\n toast_init_path = toast_path + \"_init\"\n\n toast_idx_path = conn.execute(\"select pg_relation_filepath('{0}.{1}')\".format(\"pg_toast\", \"pg_toast_\" + str(heap_oid) + \"_index\"))[0][0]\n conn.commit()\n toast_index_idx_path = toast_idx_path + \"_init\"\n\n unlogged_heap_filename = os.path.basename(heap_path)\n unlogged_heap_init_filename = unlogged_heap_filename + \"_init\"\n\n unlogged_idx_filename = os.path.basename(index_path)\n unlogged_idx_init_filename = unlogged_idx_filename + \"_init\"\n\n unlogged_toast_filename = os.path.basename(toast_path)\n unlogged_toast_init_filename = unlogged_toast_filename + \"_init\"\n\n unlogged_idx_toast_filename = os.path.basename(toast_idx_path)\n unlogged_idx_toast_init_filename = unlogged_idx_toast_filename + \"_init\"\n\n self.backup_node(backup_dir, 'node', node, backup_type='full', options=['--stream'])\n\n found_unlogged_heap_init = False\n found_unlogged_idx_init = False\n found_unlogged_toast = False\n found_unlogged_idx_toast_init = False\n for root, dirs, files in os.walk(backup_dir):\n for file in files:\n if file in [unlogged_heap_filename, unlogged_heap_filename + \".1\",\n unlogged_idx_filename,\n unlogged_idx_filename + \".1\",\n unlogged_toast_filename,\n unlogged_toast_filename + \".1\",\n unlogged_idx_toast_filename,\n unlogged_idx_toast_filename + \".1\"]:\n self.assertTrue(False, \"Found unlogged table file in backup catalogue.\\n Filepath: {0}\".format(file))\n\n if file == unlogged_heap_init_filename:\n found_unlogged_heap_init = True\n\n if file == unlogged_idx_init_filename:\n found_unlogged_idx_init = True\n\n if file == unlogged_toast_init_filename:\n found_unlogged_toast = True\n\n if file == unlogged_idx_toast_init_filename:\n found_unlogged_idx_toast_init = True\n\n self.assertTrue(found_unlogged_heap_init, \"{0} is not found in backup catalogue\".format(unlogged_heap_init_filename));\n self.assertTrue(found_unlogged_idx_init, \"{0} is not found in backup catalogue\".format(unlogged_idx_init_filename));\n self.assertTrue(found_unlogged_toast, \"{0} is not found in backup catalogue\".format(unlogged_toast_filename));\n self.assertTrue(found_unlogged_idx_toast_init, \"{0} is not found in backup catalogue\".format(unlogged_idx_toast_init_filename));\n\n # Clean after yourself\n self.del_test_dir(module_name, fname)",
"def test_backup_restore_with_warmup(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n NodeHelper.do_a_warm_up(self.backupset.cluster_host)\n self.sleep(30)\n self.backup_cluster_validate()\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n \"\"\" only membase bucket has warmup state \"\"\"\n if self.bucket_type == \"membase\":\n NodeHelper.wait_warmup_completed([self.backupset.cluster_host])",
"def test_backup_with_compress_flag(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backupset.backup_compressed = False\n self.backup_cluster()\n no_compression = self.get_database_file_info()\n self.log.info(\"\\nDelete old backup and do backup again with compress flag\")\n self.backup_create()\n self.backupset.backup_compressed = self.input.param(\"backup-compressed\", False)\n self.backup_cluster()\n with_compression = self.get_database_file_info()\n self.validate_backup_compressed_file(no_compression, with_compression)",
"def test_backup_restore_with_auto_compaction(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n rest = RestConnection(self.backupset.cluster_host)\n rest.set_auto_compaction(dbFragmentThresholdPercentage=80,\n dbFragmentThreshold=100,\n viewFragmntThresholdPercentage=80,\n viewFragmntThreshold=100,\n bucket=\"default\")\n self.backup_create()\n self.backup_cluster_validate()\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")",
"def test_restore_from_compacted_backup(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster_validate()\n self.backup_compact()\n self.backup_restore_validate()",
"def makeBlockTablePickle(config, db_config, start_time):\n\n try:\n temp_time = time.localtime()\n\n # make the connection string\n connection_string = 'postgresql://%s:%s@%s:%s/%s' %\\\n (db_config['db_user'], db_config['db_password'], \n db_config['db_host'], db_config['db_port'], db_config['db']) \n\n # build the query that will make the block table pickle\n engine = sal.create_engine(connection_string)\n sql_string = \"\"\"\n SELECT CAST(\"BLOCK_FIPS\" AS TEXT) as geoid{0}, \"ALAND{0}\", geom \n FROM {1}.nbm2_block_{2}\n \"\"\".format( config['census_vintage'][2:], db_config['db_schema'], \n config['census_vintage'])\n\n # load the data into a dataframe\n starting_crs={'init':'epsg:%s' % db_config['SRID']}\n block_df = gpd.read_postgis(sql_string, engine, crs=starting_crs)\n\n my_message = \"\"\"\n INFO - STEP 0 (MASTER): TASK 6 OF 13 - READ IN BLOCK DATA TABLE\n \"\"\"\n my_message = ' '.join(my_message.split())\n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n\n # write out the pickle\n temp_time = time.localtime()\n with open(config['temp_pickles']+'block_df.pkl','wb') as my_pickle:\n pickle.dump(block_df, my_pickle)\n\n my_message = \"\"\"\n INFO - STEP 0 (MASTER): TASK 6 OF 13 - PICKLED OFF THE BLOCK_TABLE\n \"\"\"\n my_message = ' '.join(my_message.split())\n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n \n block_df = None\n del block_df\n engine = None\n del engine\n my_pickle = None\n del my_pickle\n\n\n gc.collect()\n\n return True\n\n except:\n my_message = \"\"\"\n ERROR - STEP 0 (MASTER): TASK 6 OF 13 - COULD NOT PICKLE OFF THE \n BLOCK_TABLE\n \"\"\"\n my_message = ' '.join(my_message.split()) + '\\n' +\\\n traceback.format_exc()\n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n\n del block_df\n gc.collect()\n\n return False",
"def test_backup_restore_with_recreate(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster_validate()\n rest = RestConnection(self.backupset.cluster_host)\n rest.delete_bucket()\n bucket_name = \"default\"\n rest_helper = RestHelper(rest)\n rest.create_bucket(bucket=bucket_name, ramQuotaMB=512)\n bucket_ready = rest_helper.vbucket_map_ready(bucket_name)\n if not bucket_ready:\n self.fail(\"Bucket {0} is not created after 120 seconds.\".format(bucket_name))\n self.log.info(\"Deleted {0} bucket and recreated it - restoring it now..\"\\\n .format(bucket_name))\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")",
"def verify_no_snapshot_reingestion(c: Composition) -> None:\n c.run(\"testdrive\", \"wait-for-snapshot.td\", \"postgres-disable-select-permission.td\")\n\n restart_mz(c)\n\n c.run(\n \"testdrive\",\n \"delete-rows-t1.td\",\n \"delete-rows-t2.td\",\n \"alter-table.td\",\n \"alter-mz.td\",\n )",
"def __makeBackup(self):\n pass #FIXME!!!",
"def test_backup_restore_after_online_upgrade(self):\n if self.initial_version[:1] == \"5\" and self.upgrade_versions[0][:1] >= \"7\":\n self.log.error(\"\\n\\n\\n*** ERROR: Direct upgrade from {0} to {1} does not support.\\\n Test will skip\\n\\n\"\\\n .format(self.initial_version[:5], self.upgrade_versions[0][:5]))\n return\n servers = copy.deepcopy(self.servers)\n self.vbuckets = self.initial_vbuckets\n if len(servers) != 4:\n self.fail(\"\\nThis test needs exactly 4 nodes to run! \")\n\n self._install(servers)\n count = 0\n nodes_fail_to_install = []\n for server in servers:\n ready = RestHelper(RestConnection(server)).is_ns_server_running(60)\n if ready:\n count += 1\n else:\n nodes_fail_to_install.append(server.ip)\n if count < len(servers):\n self.fail(\"Some servers may not install Couchbase server: {0}\"\\\n .format(nodes_fail_to_install))\n\n if not self.disable_diag_eval_on_non_local_host:\n self.enable_diag_eval_on_non_local_hosts()\n cmd = 'curl -g {0}:8091/diag/eval -u {1}:{2} '.format(self.master.ip,\n self.master.rest_username,\n self.master.rest_password)\n cmd += '-d \"path_config:component_path(bin).\"'\n bin_path = subprocess.check_output(cmd, shell=True)\n try:\n bin_path = bin_path.decode()\n except AttributeError:\n pass\n if \"bin\" not in bin_path:\n self.fail(\"Check if cb server install on %s\" % self.master.ip)\n else:\n self.cli_command_location = bin_path.replace('\"', '') + \"/\"\n\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n rebalance = self.cluster.async_rebalance(servers[:self.nodes_init],\n [servers[int(self.nodes_init) - 1]], [])\n rebalance.result()\n self.sleep(15)\n self.add_built_in_server_user()\n rest = RestConnection(self.master)\n cb_version = rest.get_nodes_version()\n initial_compression_mode = \"off\"\n if 5.5 > float(cb_version[:3]):\n self.compression_mode = initial_compression_mode\n\n rest.create_bucket(bucket='default', ramQuotaMB=512,\n compressionMode=self.compression_mode)\n self.buckets = rest.get_buckets()\n self._load_all_buckets(self.master, gen, \"create\", 0)\n\n \"\"\" create index \"\"\"\n if self.create_gsi:\n if \"5\" > rest.get_nodes_version()[:1]:\n if self.gsi_type == \"forestdb\":\n self.fail(\"Need to set param self.gsi_type=memory_optimized\")\n rest.set_indexer_storage_mode(storageMode=\"memory_optimized\")\n else:\n rest.set_indexer_storage_mode(storageMode=\"plasma\")\n self.create_indexes()\n self.backup_create()\n if self.backupset.number_of_backups > 1:\n self.log.info(\"Start doing multiple backup\")\n for i in range(1, self.backupset.number_of_backups + 1):\n self._backup_restore_with_ops()\n else:\n self.backup_cluster_validate()\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n self.sleep(5)\n self.backup_list()\n\n \"\"\" Start to online upgrade using swap rebalance \"\"\"\n self.initial_version = self.upgrade_versions[0]\n if self.force_version_upgrade:\n self.initial_version = self.force_version_upgrade\n self.sleep(self.sleep_time,\n \"Pre-setup of old version is done. Wait for online upgrade to: \"\n \"{0} version\".format(self.initial_version))\n self.product = 'couchbase-server'\n self._install(servers[2:])\n self.sleep(self.sleep_time,\n \"Installation of new version is done. Wait for rebalance\")\n self.log.info(\n \"Rebalanced in upgraded nodes and rebalanced out nodes with old version\")\n add_node_services = [self.add_node_services]\n if \"-\" in self.add_node_services:\n add_node_services = self.add_node_services.split(\"-\")\n\n self.cluster.rebalance(servers, servers[2:], servers[:2],\n services=add_node_services)\n self.sleep(15)\n self.backupset.cluster_host = servers[2]\n \"\"\" Upgrade is done \"\"\"\n self.log.info(\"** Upgrade is done **\")\n healthy = False\n timeout = 0\n while not healthy:\n healthy = RestHelper(RestConnection(self.backupset.cluster_host)).is_cluster_healthy()\n if not healthy:\n if timeout == 120:\n self.fail(\"Node %s is not ready after 2 mins\" % self.backupset.cluster_host)\n else:\n self.sleep(5, \"Wait for server up \")\n timeout += 5\n else:\n healthy = True\n if \"5\" <= RestConnection(servers[2]).get_nodes_version()[:1]:\n for user in self.users_check_restore:\n user_name = user.replace('[', '_').replace(']', '_')\n testuser = [{'id': user_name, 'name': user_name,\n 'password': 'password'}]\n rolelist = [{'id': user_name, 'name': user_name,\n 'roles': user}]\n\n self.log.info(\"**** add built-in '%s' user to node %s ****\" % (testuser[0][\"name\"],\n servers[2].ip))\n RbacBase().create_user_source(testuser, 'builtin', servers[2])\n\n self.log.info(\"**** add '%s' role to '%s' user ****\" % (rolelist[0][\"roles\"],\n testuser[0][\"name\"]))\n status = RbacBase().add_user_role(rolelist, RestConnection(servers[2]), 'builtin')\n self.log.info(status)\n if self.backupset.number_of_backups_after_upgrade:\n self.backupset.number_of_backups += \\\n self.backupset.number_of_backups_after_upgrade\n if \"5\" <= RestConnection(servers[2]).get_nodes_version()[:1]:\n self.add_built_in_server_user(node=servers[2])\n for i in range(1, self.backupset.number_of_backups_after_upgrade + 2):\n self.log.info(\"_backup_restore_with_ops #{0} started...\".format(i))\n validate_dir_struct = True\n if i > 2:\n validate_dir_struct = False\n self._backup_restore_with_ops(node=self.backupset.cluster_host, repeats=1,\n validate_directory_structure=validate_dir_struct)\n self.backup_list()\n\n \"\"\" merged after upgrade \"\"\"\n if self.after_upgrade_merged:\n self.backupset.start = 1\n self.backupset.end = len(self.backups)\n self.backup_merge_validate()\n self.backup_list()\n\n backupsets = [self.backupset]\n if \"5\" <= RestConnection(servers[2]).get_nodes_version()[:1]:\n for user in self.users_check_restore:\n new_backupset = copy.deepcopy(self.backupset)\n new_backupset.restore_cluster_host_username = user.replace('[', '_').replace(']', '_')\n backupsets.append(new_backupset)\n for backupset in backupsets:\n self.backupset = backupset\n if self.bucket_flush:\n self.log.info(\"Start to flush bucket\")\n rest = RestConnection(servers[2])\n rest.flush_bucket()\n else:\n self.bucket_helper.delete_bucket_or_assert(self.backupset.cluster_host,\n \"default\", self)\n \"\"\" Re-create default bucket on upgrade cluster \"\"\"\n RestConnection(servers[2]).create_bucket(bucket='default',\n ramQuotaMB=512,\n compressionMode=self.compression_mode)\n self.sleep(5)\n self.total_buckets = len(self.buckets)\n\n if self.after_upgrade_merged:\n self.backupset.end = 1\n\n \"\"\" restore back to cluster \"\"\"\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n if self.create_gsi:\n self.verify_gsi()",
"def test_add_node_after_mv(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n \"\"\"\n @jira_ticket CASSANDRA-12984\n\n Assert that MVs are marked as build after bootstrap. Otherwise newly streamed MVs will be built again\n \"\"\"\n assert_one(session2, \"SELECT count(*) FROM system.built_views WHERE keyspace_name = 'ks' AND view_name = 't_by_v'\", [1])\n\n for i in range(1000):\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n for i in range(1000, 1100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000, 1100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])",
"def before_scenario(context, _):\n context.backup_rotation = br\n context.backup_root_raw = tempfile.TemporaryDirectory()\n context.backup_root = context.backup_root_raw.name\n context.created_files = {}\n\n logging.info(\"Creating %s\" , context.backup_root)\n\n for bucket in [\"yearly\", \"monthly\", \"daily\"]:\n os.mkdir(os.path.join(context.backup_root, bucket))\n context.created_files[bucket] = {\"backup\": [], \"miscellaneous\": []}",
"def test_backup_with_update_on_disk_of_snapshot_markers(self):\n version = RestConnection(self.backupset.backup_host).get_nodes_version()\n if version[:5] == \"6.5.0\":\n self.log.info(\"\\n\\n******* Due to issue in MB-36904, \\\n \\nthis test will be skipped in 6.5.0 ********\\n\")\n return\n gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size, end=100000)\n gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size, end=100000)\n gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size, end=100000)\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.create_bucket(bucket=\"bucket0\", ramQuotaMB=1024)\n self.buckets = rest_conn.get_buckets()\n authentication = \"-u Administrator -p password\"\n\n self._load_all_buckets(self.master, gen1, \"create\", 0)\n self.log.info(\"Stop persistent\")\n cluster_nodes = rest_conn.get_nodes()\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop %s\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n \"bucket0\",\n authentication))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n self._load_all_buckets(self.master, gen2, \"create\", 0)\n self.log.info(\"Run full backup with cbbackupwrapper\")\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n backup_dir = self.tmp_path + \"backup\" + self.master.ip\n shell.execute_command(\"rm -rf %s\" % backup_dir)\n shell.execute_command(\"mkdir %s\" % backup_dir)\n shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n self.log.info(\"Load 3rd batch docs\")\n self._load_all_buckets(self.master, gen3, \"create\", 0)\n self.log.info(\"Run diff backup with cbbackupwrapper\")\n output, _ = shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n\n if output and \"SUCCESSFULLY COMPLETED\" not in output[1]:\n self.fail(\"Failed to backup as the fix in MB-25727\")\n shell.disconnect()",
"def test_backup_purge(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n old_backup_name = \"\"\n new_backup_name = \"\"\n backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,\n objstore_provider=self.objstore_provider,\n resume=self.backupset.resume, purge=self.backupset.purge,\n no_progress_bar=self.no_progress_bar,\n cli_command_location=self.cli_command_location,\n cb_version=self.cb_version)\n self.sleep(10)\n conn = RemoteMachineShellConnection(self.backupset.cluster_host)\n conn.kill_erlang()\n output = backup_result.result(timeout=200)\n self.log.info(str(output))\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)\n if output and output[0]:\n bk_info = json.loads(output[0])\n bk_info = bk_info[\"repos\"][0]\n else:\n return False, \"No output content\"\n if bk_info[\"backups\"]:\n for i in range(0, len(bk_info[\"backups\"])):\n old_backup_name = bk_info[\"backups\"][i][\"date\"]\n self.log.info(\"Backup name before purge: \" + old_backup_name)\n conn.start_couchbase()\n conn.disconnect()\n self.sleep(30)\n output, error = self.backup_cluster()\n if error or not self._check_output(\"Backup completed successfully\", output):\n self.fail(output)\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)\n if output and output[0]:\n bk_info = json.loads(output[0])\n bk_info = bk_info[\"repos\"][0]\n else:\n return False, \"No output content\"\n if bk_info[\"backups\"]:\n for i in range(0, len(bk_info[\"backups\"])):\n new_backup_name = bk_info[\"backups\"][i][\"date\"]\n self.log.info(\"Backup name after purge: \" + new_backup_name)\n\n # Once the purge (and backup) have completed we shouldn't see any orphaned multipart uploads\n if self.objstore_provider:\n self.assertEqual(\n self.objstore_provider.num_multipart_uploads(), 0,\n \"Expected all multipart uploads to have been purged (all newly created ones should have also been completed)\"\n )\n\n self.assertNotEqual(old_backup_name, new_backup_name,\n \"Old backup name and new backup name are same when purge is used\")\n self.log.info(\"Old backup name and new backup name are not same when purge is used\")",
"def export_coin_nodes(timestamp, config, export_dir, is_merged_export):\n start = time.time()\n utils.create_folder_if_not_exists(export_dir)\n filename = datetime.fromtimestamp(timestamp).strftime('%Y%m%d-%H_%M')\n base_path = os.path.join(export_dir, \"{}\".format(filename))\n csv_path = base_path + \".csv\"\n txt_path = base_path + \".txt\"\n\n connection = sqlite3.connect(config['storage_file'], timeout=120)\n connection.row_factory = sqlite3.Row\n\n # Select all nodes that have been online at least once in the last 24 hours\n nodes_online_24h = connection.execute('SELECT * ' +\n 'FROM ' + config['coin_name'] + '_nodes ' +\n 'WHERE timestamp >= ?', [time.time() - 24 * 60 * 60])\n\n block_height_values = connection.execute('SELECT last_block ' +\n 'FROM ' + config['coin_name'] + '_nodes ' +\n 'WHERE timestamp = ?', [timestamp])\n block_height_values = map(lambda i: i['last_block'], block_height_values.fetchall())\n median_block_height = utils.median(block_height_values)\n\n # Turn nodes into a dict of node_address -> data\n nodes_online_24h_dict = {}\n for row in nodes_online_24h:\n nodes_online_24h_dict[row['node_address']] = dict(itertools.izip(row.keys(), row))\n\n for (interval_name, interval_seconds) in UPTIME_INTERVALS.items():\n calculate_node_uptime(connection, nodes_online_24h_dict, timestamp,\n interval_name, interval_seconds, config['coin_name'])\n\n with open(csv_path, 'a') as csv_file, open(txt_path, 'a') as txt_file:\n csv_writer = csv.writer(csv_file, delimiter=\",\", quoting=csv.QUOTE_NONNUMERIC, encoding='utf-8')\n txt_writer = csv.writer(txt_file, delimiter=\" \", quoting=csv.QUOTE_NONNUMERIC, encoding='utf-8')\n for node in nodes_online_24h_dict.values():\n output_data = [\n node['node_address'],\n node.get('uptime_two_hours', '100.00%'),\n node.get('uptime_eight_hours', '100.00%'),\n node.get('uptime_day', '100.00%'),\n node.get('uptime_seven_days', '100.00%'),\n node.get('uptime_thirty_days', '100.00%')]\n\n if not is_merged_export:\n output_data.append(node['last_block'])\n output_data.append(node['protocol_version'])\n output_data.append(node['client_version'])\n\n output_data.append(node['country_iso'])\n output_data.append(node['country_name'])\n output_data.append(node['city'])\n output_data.append(node['isp_cloud'])\n\n is_synced = abs(median_block_height - node['last_block']) <= \\\n config['max_block_height_difference']\n output_data.append(1 if is_synced else 0)\n\n if node['is_masternode'] is not None:\n output_data.append(node['is_masternode'])\n\n if is_synced or config['include_out_of_sync']:\n csv_writer.writerow(output_data)\n txt_writer.writerow(output_data)\n\n connection.close()\n logging.info(\"Export took %d seconds\", time.time() - start)\n logging.info(\"Wrote %s and %s\", csv_path, txt_path)\n\n return csv_path, txt_path",
"def main(dest_dir, db_host, db_port, db_name, db_schema, db_username, db_password, ssl_mode,\n force, cores, memory_per_core, default_partition_col, partition_col,\n nr_partitions):\n partition_col_dict = {k: v for k, v in partition_col}\n nr_partitions_dict = {k: v for k, v in nr_partitions}\n\n dest_dir_path = Path(dest_dir)\n dest_dir_path.mkdir(exist_ok=True, parents=True)\n\n db_params = PostgresDBParams(user=db_username, host=db_host, password=db_password,\n port=db_port, db=db_name, schema=db_schema, ssl_mode=ssl_mode)\n\n with PostgresDBConnectionWrapper(db_params) as db_wrapper:\n tables = db_wrapper.list_tables()\n\n spark_cfg = spark_wrapper.default_spark_config(cores, memory_per_core, use_utc=True)\n with spark_wrapper.create_spark_session_from_config(spark_cfg) as spark:\n dumper = PostgresTableDumper(db_params, spark)\n for t in tables:\n logging.info('Dumping table %s', t)\n\n tbl_path = Path(dest_dir_path, t)\n\n if not tbl_path.exists() and not force:\n default_col = None\n\n if default_partition_col:\n cols = db_wrapper.list_columns(t)\n if default_partition_col in cols:\n default_col = default_partition_col\n else:\n logging.warning(\n \"Default partition column %s not found among columns [%s]\",\n default_partition_col, ','.join(cols))\n\n p_col = partition_col_dict.get(t, default_col)\n nr_part = nr_partitions_dict.get(t, None)\n\n dumper.dump_table(t, tbl_path, p_col, nr_part)\n else:\n logging.info('Path %s already exists, not dumping table %s',\n tbl_path, t)\n\n counts_match = row_counts_match(tbl_path, t, db_wrapper, spark)\n\n if counts_match:\n logging.info(\"Counts for %s match\", t)\n else:\n logging.error(\"Counts for %s don't match\", t)",
"def store_reachable_nodes(nodes, timestamp):\n start = time.time()\n utils.create_folder_if_not_exists(os.path.dirname(CONF['storage_file']))\n connection = sqlite3.connect(CONF['storage_file'], timeout=120)\n\n connection.execute('CREATE TABLE IF NOT EXISTS ' + CONF['coin_name'] + '_nodes ' +\n '(id INTEGER PRIMARY KEY AUTOINCREMENT, ' +\n 'node_address TEXT NOT NULL, '\n 'timestamp INT NOT NULL, ' +\n 'last_block INT NOT NULL, ' +\n 'protocol_version INT NOT NULL, ' +\n 'client_version TEXT NOT NULL, ' +\n 'country_iso TEXT,' +\n 'country_name TEXT, ' +\n 'city TEXT, ' +\n 'isp_cloud TEXT, ' +\n 'is_masternode INT, ' +\n 'UNIQUE(node_address, timestamp) ON CONFLICT IGNORE)')\n\n if CONF.has_key('dash_insight_api'):\n dash_masternodes = get_dash_masternode_addresses()\n is_dash = True\n else:\n dash_masternodes = None\n is_dash = False\n\n insert_nodes = []\n for node in nodes:\n row = get_row(node)\n address = \"{}:{}\".format(row[0], row[1])\n is_masternode = address in dash_masternodes if is_dash else None\n insert_nodes.append([timestamp, row[6], address, row[2], row[3], row[9],\n row[10], row[13], row[15], is_masternode])\n\n connection.executemany('INSERT INTO ' + CONF['coin_name'] + '_nodes (' +\n 'timestamp, last_block, node_address, protocol_version, ' +\n 'client_version, country_iso, country_name, city, ' +\n 'isp_cloud, is_masternode) '\n 'VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', insert_nodes)\n connection.commit()\n connection.close()\n logging.info(\"Store took %d seconds\", time.time() - start)",
"def test_backup_restore_with_ops(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n initial_gen = copy.deepcopy(gen)\n initial_keys = []\n for x in initial_gen:\n initial_keys.append(x[0])\n self.log.info(\"Start to load items to all buckets\")\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.ops_type = self.input.param(\"ops-type\", \"update\")\n self.log.info(\"Create backup repo \")\n self.backup_create()\n for i in range(1, self.backupset.number_of_backups + 1):\n self._backup_restore_with_ops()\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n\n if self.compact_backup and self.ops_type == \"delete\":\n self.log.info(\"Start to compact backup \")\n self.backup_compact_validate()\n self.log.info(\"Validate deleted keys\")\n self.backup_compact_deleted_keys_validation(initial_keys)\n\n self.log.info(\"start restore cluster \")\n restored = {\"{0}/{1}\".format(start, end): \"\"}\n for i in range(1, self.backupset.number_of_backups + 1):\n self.backupset.start = start\n self.backupset.end = end\n self._backup_restore_with_ops(backup=False, compare_function=\">=\")\n if self.backupset.number_of_backups == 1:\n continue\n while \"{0}/{1}\".format(start, end) in restored:\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n restored[\"{0}/{1}\".format(start, end)] = \"\"",
"def time_step(self):\n\n self.reinitialize_backup_containers()\n\n super().time_step()\n\n self.make_a_backup_for_t()",
"def test_create_node_shutdown_item(self):\n pass",
"def test_backup_restore_sanity(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self.log.info(\"*** start to load items to all buckets\")\n self._load_all_buckets(self.master, gen, \"create\", self.expires)\n self.log.info(\"*** done to load items to all buckets\")\n self.ops_type = self.input.param(\"ops-type\", \"update\")\n self.expected_error = self.input.param(\"expected_error\", None)\n if self.auto_failover:\n self.log.info(\"Enabling auto failover on \" + str(self.backupset.cluster_host))\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.update_autofailover_settings(self.auto_failover, self.auto_failover_timeout)\n self.backup_create_validate()\n for i in range(1, self.backupset.number_of_backups + 1):\n if self.ops_type == \"update\":\n self.log.info(\"*** start to update items in all buckets\")\n self._load_all_buckets(self.master, gen, \"update\", self.expires)\n self.log.info(\"*** done update items in all buckets\")\n elif self.ops_type == \"delete\":\n self.log.info(\"*** start to delete items in all buckets\")\n self._load_all_buckets(self.master, gen, \"delete\", self.expires)\n self.log.info(\"*** done to delete items in all buckets\")\n self.sleep(10)\n self.log.info(\"*** start to validate backup cluster\")\n self.backup_cluster_validate()\n self.targetMaster = True\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n self.log.info(\"*** start to restore cluster\")\n restored = {\"{0}/{1}\".format(start, end): \"\"}\n for i in range(1, self.backupset.number_of_backups + 1):\n if self.reset_restore_cluster:\n self.log.info(\"\\n*** start to reset cluster\")\n self.backup_reset_clusters(self.cluster_to_restore)\n cmd_init = 'node-init'\n if self.same_cluster:\n self.log.info(\"Same cluster\")\n self._initialize_nodes(Cluster(), self.servers[:self.nodes_init])\n if self.hostname and self.master.ip.endswith(\".com\"):\n options = '--node-init-hostname ' + self.master.ip\n shell = RemoteMachineShellConnection(self.master)\n output, _ = shell.execute_couchbase_cli(cli_command=cmd_init,\n options=options,\n cluster_host=\"localhost\",\n user=self.master.rest_username,\n password=self.master.rest_password)\n shell.disconnect()\n if not self._check_output(\"SUCCESS: Node initialize\", output):\n raise(\"Failed to set hostname\")\n else:\n self.log.info(\"Different cluster\")\n shell = RemoteMachineShellConnection(self.backupset.restore_cluster_host)\n shell.enable_diag_eval_on_non_local_hosts()\n rest = RestConnection(self.backupset.restore_cluster_host)\n rest.force_eject_node()\n rest.init_node()\n if self.hostname and self.backupset.restore_cluster_host.ip.endswith(\".com\"):\n options = '--node-init-hostname ' + self.backupset.restore_cluster_host.ip\n output, _ = shell.execute_couchbase_cli(cli_command=cmd_init, options=options,\n cluster_host=\"localhost\",\n user=self.backupset.restore_cluster_host.rest_username,\n password=self.backupset.restore_cluster_host.rest_password)\n if not self._check_output(\"SUCCESS: Node initialize\", output):\n raise(\"Failed to set hostname\")\n shell.disconnect()\n self.log.info(\"\\n*** Done reset cluster\")\n self.sleep(10)\n\n \"\"\" Add built-in user cbadminbucket to second cluster \"\"\"\n self.add_built_in_server_user(node=self.input.clusters[0][:self.nodes_init][0])\n\n self.backupset.start = start\n self.backupset.end = end\n self.log.info(\"*** start restore validation\")\n self.backup_restore_validate(compare_uuid=False,\n seqno_compare_function=\">=\",\n expected_error=self.expected_error)\n if self.backupset.number_of_backups == 1:\n continue\n while \"{0}/{1}\".format(start, end) in restored:\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n restored[\"{0}/{1}\".format(start, end)] = \"\"",
"def test_restore_backup():",
"def version_create(self, node, hash, size, type, source, muser, uuid,\n checksum, cluster=0,\n update_statistics_ancestors_depth=None):\n\n q = (\"insert into versions (node, hash, size, type, source, mtime, \"\n \"muser, uuid, checksum, cluster) \"\n \"values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\")\n mtime = time()\n props = (node, hash, size, type, source, mtime, muser,\n uuid, checksum, cluster)\n serial = self.execute(q, props).lastrowid\n self.statistics_update_ancestors(node, 1, size, mtime, cluster,\n update_statistics_ancestors_depth)\n\n self.nodes_set_latest_version(node, serial)\n\n return serial, mtime",
"def makeTree(node,baseName,baseAddress,nodes,parentNode,vars,isGenerated):\n \n if (isGenerated == None or isGenerated == False) and node.get('generate') is not None and node.get('generate') == 'true':\n generateSize = parseInt(node.get('generate_size'))\n generateAddressStep = parseInt(node.get('generate_address_step'))\n generateIdxVar = node.get('generate_idx_var')\n for i in range(0, generateSize):\n vars[generateIdxVar] = i\n makeTree(node, baseName, baseAddress + generateAddressStep * i, nodes, parentNode, vars, True)\n return\n newNode = Node()\n name = baseName\n if baseName != '': name += '.'\n if node.get('id') is not None:\n name += node.get('id')\n name = substituteVars(name, vars)\n newNode.name = name\n if node.get('description') is not None:\n newNode.description = node.get('description')\n address = baseAddress\n if node.get('address') is not None:\n address = baseAddress + parseInt(node.get('address'))\n newNode.address = address\n newNode.real_address = (address<<2)+0x64000000\n newNode.permission = node.get('permission')\n newNode.mask = parseInt(node.get('mask'))\n newNode.isModule = node.get('fw_is_module') is not None and node.get('fw_is_module') == 'true'\n if node.get('sw_monitor_warn_min_threshold') is not None:\n newNode.warn_min_value = node.get('sw_monitor_warn_min_threshold') \n if node.get('sw_monitor_error_min_threshold') is not None:\n newNode.error_min_value = node.get('sw_monitor_error_min_threshold') \n nodes[name] = newNode\n if parentNode is not None:\n parentNode.addChild(newNode)\n newNode.parent = parentNode\n newNode.level = parentNode.level+1\n for child in node:\n makeTree(child,name,address,nodes,newNode,vars,False)",
"def load_dump(self):\n # Create uuid extension\n command = \"CREATE EXTENSION IF NOT EXISTS \\\"uuid-ossp\\\";\"\n try:\n self.cursor.execute(command)\n except:\n # uuid extension already exists\n pass\n print(\"uuid extension couldn't be created\")\n\n path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'SQL', 'fa2909.sql')\n try:\n self.cursor.execute(open(path, \"r\").read())\n print('table was created successfully')\n return True\n except:\n # error\n print(\"table couldn't be created\")\n return False",
"def table_dump_query(table_name, path, rows_per_dump):\n return\"\"\"\n DEFINE TEMP-TABLE tt NO-UNDO LIKE %(table_name)s\n FIELD rec_id AS RECID\n FIELD epoch_time AS INT64.\n\n DEFINE VARIABLE epoch AS DATETIME NO-UNDO.\n DEFINE VARIABLE unixTime AS INT64 NO-UNDO.\n DEFINE VARIABLE htt AS HANDLE NO-UNDO.\n DEFINE VARIABLE cFileName AS CHARACTER NO-UNDO FORMAT \"x(60)\".\n DEFINE VARIABLE rowCount as INT64 NO-UNDO.\n\n epoch = DATETIME(1,1,1970,0,0,0,0).\n rowCount = 0.\n\n htt = TEMP-TABLE tt:HANDLE.\n\n FOR EACH platte.%(table_name)s NO-LOCK:\n IF rowCount = %(rows_per_dump)s THEN DO: \n unixTime = interval(NOW, epoch, \"milliseconds\").\n cFileName = \"%(path)s/t__%(table_name)s__e__\" + STRING(unixTime) + \"__insert.json\".\n htt:WRITE-JSON(\"FILE\", cFileName + \"_partial\", TRUE).\n OS-RENAME VALUE(cFileName + \"_partial\") VALUE(cFileName).\n rowCount = 0.\n EMPTY TEMP-TABLE tt.\n END.\n rowCount = rowCount + 1.\n CREATE tt.\n BUFFER-COPY %(table_name)s TO tt.\n tt.rec_id = RECID(%(table_name)s).\n unixTime = interval(NOW, epoch, \"milliseconds\").\n tt.epoch_time = unixTime.\n END.\n unixTime = interval(NOW, epoch, \"milliseconds\").\n cFileName = \"%(path)s/t__%(table_name)s__e__\" + STRING(unixTime) + \"__insert.json\".\n htt:WRITE-JSON(\"FILE\", cFileName + \"_partial\", TRUE).\n OS-RENAME VALUE(cFileName + \"_partial\") VALUE(cFileName)\n \n\"\"\" % {'path': path, 'table_name': table_name, 'rows_per_dump': rows_per_dump}",
"def test_backup_restore_after_rebalance(self):\n serv_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]\n serv_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create_validate()\n self.backupset.number_of_backups = 1\n rebalance = self.cluster.async_rebalance(self.cluster_to_backup, serv_in, serv_out)\n rebalance.result()\n self.backup_cluster_validate()\n if not self.same_cluster:\n self._initialize_nodes(Cluster(), self.input.clusters[0][:self.nodes_init])\n serv_in = self.input.clusters[0][self.nodes_init: self.nodes_init + self.nodes_in]\n serv_out = self.input.clusters[0][self.nodes_init - self.nodes_out: self.nodes_init]\n rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_in, serv_out)\n else:\n rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_out, serv_in)\n rebalance.result()\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\"<=\")",
"def recreate():\n drop()\n create()"
]
| [
"0.59866905",
"0.58030164",
"0.566614",
"0.55969876",
"0.5524487",
"0.55036587",
"0.5501728",
"0.54733956",
"0.5441421",
"0.53914344",
"0.53887075",
"0.5378152",
"0.53320277",
"0.5308021",
"0.5263021",
"0.5219191",
"0.5218431",
"0.52177876",
"0.5212545",
"0.5208883",
"0.5200093",
"0.51770574",
"0.5176457",
"0.51761466",
"0.5167201",
"0.51543176",
"0.51384807",
"0.5090144",
"0.5077433",
"0.50753057"
]
| 0.6901967 | 0 |
Checks if this intersatellite link could transmit data (stateindependent). | def couldTransmit(self, data, receiver, txLocation, rxLocation, context):
return super(InterSatelliteLink, self).couldTransmit(data, receiver) \
and txLocation.isOrbit() \
and rxLocation.isOrbit() \
and (abs(txLocation.sector - rxLocation.sector) <= 1
or abs(txLocation.sector - rxLocation.sector)
>= context.getNumSectors() - 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def couldReceive(self, data, transmitter, txLocation, rxLocation, context):\n return super(InterSatelliteLink, self).couldReceive(data, transmitter) \\\n and txLocation.isOrbit() \\\n and rxLocation.isOrbit() \\\n and (abs(txLocation.sector - rxLocation.sector) <= 1\n or abs(txLocation.sector - rxLocation.sector) \n >= context.getNumSectors() - 1)",
"def canSend(self):\n return self._lte.isconnected()",
"def valid_for_send(self, app):\n return (\n (self.to is not None) and\n (self.next_hop is not None) and\n (self.source is not None) and\n (self.command is not None) and\n (self.handler is not None) and\n (self.kind is not None) and\n (self.time_to_live is not None) and\n (self.time_to_live >= app.tick)\n )",
"def assumed_state(self):\n if self.tahoma_device.type.startswith(\"rts\"):\n return True\n\n return False",
"def is_tx(self):\n return self._pin_name in TX_CHANNELS",
"def available(self) -> bool:\n\n if CORE_STATUS_STATE in self.tahoma_device.active_states:\n return bool(\n self.tahoma_device.active_states.get(CORE_STATUS_STATE) == \"available\"\n )\n\n if CORE_SENSOR_DEFECT_STATE in self.tahoma_device.active_states:\n return (\n self.tahoma_device.active_states.get(CORE_SENSOR_DEFECT_STATE) != \"dead\"\n )\n\n # A RTS power socket doesn't have a feedback channel,\n # so we must assume the socket is available.\n return True",
"def can_send_eth(ir):\n return isinstance(ir, Call) and ir.can_send_eth()",
"def is_not_outgoing(self, pkt):\n try:\n return pkt[Ether].src.lower() != get_if_hwaddr(conf.iface).lower()\n except IndexError:\n return False",
"def data_available(self):\n return (self.status & 0x08) != 0",
"def available(self) -> bool:\n return super().available and bool(self.data)",
"def would_retransmit(self):\n return not self.my_pending_requests.is_empty()",
"def available(self) -> bool:\n return bool(self._api.surveillance_station)",
"def DataAvailable(self) -> bool:",
"def is_setup(self):\n return self._market_data_sock_info.ready.is_set() and \\\n self._orders_sock_info.ready.is_set()",
"def available(self) -> bool:\n if self._avm_wrapper.devices[self._mac].wan_access is None:\n return False\n return super().available",
"def Check_Communications(self):\n self.serial_status = False\n try:\n self.serial_status = self.ser.isOpen()\n except Exception as e:\n print \"No communication to stage serial bus. Exception of type %s and args = \\n\"%type(e).__name__, e.args \n self.serial_status = False\n self.encoder_status = False\n try:\n self.encoder_status = True\n for i in range(3):\n value = self.fd_channel[i].read(3)+b'\\x00' \n # read the 24 bit register (3 bytes) and add a fourth byte \n # to make it an integer.\n signed_value = struct.unpack(\"=I\", value)[0] \n if signed_value < 0 or signed_value > 2**24:\n self.encoder_status = False\n break\n except Exception as e:\n print \"No communication to optical encoders. Exception of type %s and args = \\n\"%type(e).__name__, e.args \n self.encoder_status = False\n self.comm_status = self.serial_status and self.encoder_status\n return",
"def IsWirelessUp(self):\n return self.wifi.IsUp()",
"def has_pending_packets_to_be_sent(self):\n return self.num_packets != 0",
"def isTransmitted(self) -> bool:\r\n\r\n return self.__is_transmitted",
"def available(self) -> bool:\n return len(self._state) > 0",
"def available(self):\n return self._state is not None",
"def available(self):\n return self._state is not None",
"def available(self):\n return (\n self._connector.station_id is not None\n and self._connector.latest_update is not None\n )",
"def is_outgoing(self, pkt):\n try:\n return pkt[Ether].src.lower() == get_if_hwaddr(conf.iface).lower()\n except IndexError:\n return False",
"def is_rx(self):\n return not self.is_tx",
"def is_usable(self) -> bool:\n return self.is_unlimited or self.__times > ActionState.NEUTRAL",
"def wifi_connectivity_verify(self):\n self.sendline(\"iw %s link\" % self.iface_wifi)\n matched = self.expect([\"Connected\", \"Not connected\", pexpect.TIMEOUT])\n if matched == 0:\n return True\n else:\n return False",
"def is_incall_connected(self) -> bool:",
"def can_auralise(self):\n\n if not self.sources:\n raise ValueError('No sources available')\n\n if not self.receivers:\n raise ValueError('No receivers available')\n\n if not self.atmosphere:\n raise ValueError('No atmosphere available.')\n\n if not self.geometry:\n raise ValueError('No geometry available.')\n\n return True",
"def isstationary(self):\n if np.all(np.abs(self.arroots) > 1.0):\n return True\n else:\n return False"
]
| [
"0.6808816",
"0.66868186",
"0.64477277",
"0.64418393",
"0.62366825",
"0.61757886",
"0.6150189",
"0.6097931",
"0.60884607",
"0.60488987",
"0.6041025",
"0.60021377",
"0.5998871",
"0.5975922",
"0.5969649",
"0.5958305",
"0.59526134",
"0.59449387",
"0.5935071",
"0.592336",
"0.59161896",
"0.59161896",
"0.58576113",
"0.58565974",
"0.5841801",
"0.58383286",
"0.58131564",
"0.57808876",
"0.5766208",
"0.5760751"
]
| 0.76803 | 0 |
Checks if this intersatellite link could receive data (stateindependent). | def couldReceive(self, data, transmitter, txLocation, rxLocation, context):
return super(InterSatelliteLink, self).couldReceive(data, transmitter) \
and txLocation.isOrbit() \
and rxLocation.isOrbit() \
and (abs(txLocation.sector - rxLocation.sector) <= 1
or abs(txLocation.sector - rxLocation.sector)
>= context.getNumSectors() - 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def DataAvailable(self) -> bool:",
"def data_available(self):\n return (self.status & 0x08) != 0",
"def couldTransmit(self, data, receiver, txLocation, rxLocation, context):\n return super(InterSatelliteLink, self).couldTransmit(data, receiver) \\\n and txLocation.isOrbit() \\\n and rxLocation.isOrbit() \\\n and (abs(txLocation.sector - rxLocation.sector) <= 1\n or abs(txLocation.sector - rxLocation.sector)\n >= context.getNumSectors() - 1)",
"def is_incall_connected(self) -> bool:",
"def available(self) -> bool:\n return super().available and bool(self.data)",
"def available(self) -> bool:\n\n if CORE_STATUS_STATE in self.tahoma_device.active_states:\n return bool(\n self.tahoma_device.active_states.get(CORE_STATUS_STATE) == \"available\"\n )\n\n if CORE_SENSOR_DEFECT_STATE in self.tahoma_device.active_states:\n return (\n self.tahoma_device.active_states.get(CORE_SENSOR_DEFECT_STATE) != \"dead\"\n )\n\n # A RTS power socket doesn't have a feedback channel,\n # so we must assume the socket is available.\n return True",
"def check_for_incoming_info(self):\n\n if self.test_message_response:\n self.parse_incoming_message(self.test_message_response)\n return True\n\n POLL_ONLY_TIMEOUT_VALUE = 0\n got_at_least_one = False\n while (True):\n readables, writables, errors = select.select([self.socket_datastream], [], [], POLL_ONLY_TIMEOUT_VALUE)\n if not self.socket_datastream in readables:\n return got_at_least_one\n got_at_least_one = True\n data, remote_ip_port = self.socket_datastream.recvfrom(MAX_EXPECTED_MSG_SIZE)\n if remote_ip_port != self.ip_port_arduino_datastream:\n errorhandler.loginfo(\"Msg from unexpected source {}\".format(remote_ip_port))\n else:\n errorhandler.logdebug(\"msg received:{}\".format(data.hex()))\n self.parse_incoming_message(data)",
"def available(self):\n return self._state is not None",
"def available(self):\n return self._state is not None",
"def available(self):\n from pyhs3 import STATE_LISTENING\n return self._connection.api.state == STATE_LISTENING",
"def available(self) -> bool:\n return len(self._state) > 0",
"def has_data(self):\n if len(self.channels) > 0:\n return True\n return False",
"def is_incall_dialing(self) -> bool:",
"def check_readings(self):\n # loading data from log file\n if self.filepath is not None:\n if self.all_read is None:\n return False\n else:\n ei = self.curr_indexi + self.read_step\n if ei >= self.all_read.shape[0]:\n return False\n self.curr_read = self.all_read[self.curr_index: ei, :]\n self.curr_index = ei\n return True\n\n # stream of data from beaglebone\n # check that there is new data avalible\n isnew = not all_data.empty()\n\n if isnew:\n # read most current data\n qsize = all_data.qsize()\n curr_read = [all_data.get_nowait() for _ in range(qsize)]\n self.curr_read = np.concatenate(curr_read)\n\n return isnew",
"def has_data(self):\n return len(self.data) > 0",
"def is_dialing(self) -> bool:",
"def check_availability(self):\n\t\tif not self.connection_is_usable:\n\t\t\treturn False\n\t\twith self.client_lock:\n\t\t\tif self.stream is None:\n\t\t\t\treturn False\n\t\t\tif self.last_ping is None or self.last_ping.age() >= self.ping_max_age:\n\t\t\t\tself.last_ping = SendPing(self, self.ping_timeout)\n\t\t\tlast_ping = self.last_ping\n\t\treturn last_ping.answered(self.ping_timeout)",
"def is_connected(self) -> bool:\n try:\n # When MSG_PEEK is used the data is treated as unread\n # and the next recv shall still return this data\n data = self.socket.recv(self.BUFFER_SIZE, socket.MSG_PEEK)\n if len(data) == 0:\n return False\n return True\n except ConnectionResetError:\n return False",
"def canread(self):\n return False",
"def available(self):\n return (\n self._connector.station_id is not None\n and self._connector.latest_update is not None\n )",
"def __bool__(self) -> bool:\n return not self._disconnected",
"def assumed_state(self):\n if self.tahoma_device.type.startswith(\"rts\"):\n return True\n\n return False",
"def available(self) -> bool:\n return self._ctrl.connected()",
"def get_is_data_available(self):\n return self._data_available",
"def data_ready(self):\n data_ready = len(self.barcode) > 0\n data_ready &= self.price > 0\n data_ready &= len(self.description) > 0\n return data_ready",
"def has_data(self) -> bool:\n return bool(self.data)",
"def available(self) -> bool:\n if self._avm_wrapper.devices[self._mac].wan_access is None:\n return False\n return super().available",
"def wantsReadEvent(self):\r\n if self.result != None:\r\n return self.result == 0\r\n return None",
"def is_setup(self):\n return self._market_data_sock_info.ready.is_set() and \\\n self._orders_sock_info.ready.is_set()",
"def has_data(self):\n return self._data is not None"
]
| [
"0.6806446",
"0.666152",
"0.6657909",
"0.6534576",
"0.65277207",
"0.65100396",
"0.6355405",
"0.63156337",
"0.63156337",
"0.6288527",
"0.6234863",
"0.62215567",
"0.61767733",
"0.61651605",
"0.61578643",
"0.6138238",
"0.60690147",
"0.6063522",
"0.60616744",
"0.60542154",
"0.60350037",
"0.6028451",
"0.60237044",
"0.6017338",
"0.60156476",
"0.6013961",
"0.6013796",
"0.60102254",
"0.60087246",
"0.5991833"
]
| 0.71264654 | 0 |
The basic idea here is to represent the file contents as a long string and iterate through it characterbycharacter (the 'ind' variable points to the current character). Whenever we get to a new tree, we call the function again (recursively) to read it in. | def readTree(text, ind, verbose=False):
if verbose:
print("Reading new subtree", text[ind:][:10])
# consume any spaces before the tree
while text[ind].isspace():
ind += 1
if text[ind] == "(":
if verbose:
print("Found open paren")
tree = []
ind += 1
# record the label after the paren
label = ""
while not text[ind].isspace() and text != "(":
label += text[ind]
ind += 1
tree.append(label)
if verbose:
print("Read in label:", label)
# read in all subtrees until right paren
subtree = True
while subtree:
# if this call finds only the right paren it'll return False
subtree, ind = readTree(text, ind, verbose=verbose)
if subtree:
tree.append(subtree)
# consume the right paren itself
ind += 1
assert(text[ind] == ")")
ind += 1
if verbose:
print("End of tree", tree)
return tree, ind
elif text[ind] == ")":
# there is no subtree here; this is the end paren of the parent tree
# which we should not consume
ind -= 1
return False, ind
else:
# the subtree is just a terminal (a word)
word = ""
while not text[ind].isspace() and text[ind] != ")":
word += text[ind]
ind += 1
if verbose:
print("Read in word:", word)
return word, ind | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def traverse_tree(file, tree):\n\n\tfor node in tree.get_children():\n\t\tpass",
"def fileparse(filename, node):\n\n fd = open(filename)\n line = fd.readline().strip('\\r\\n')\n\n while line != '':\n node.Add(line, node)\n line = fd.readline().strip('\\r\\n')",
"def read_file_into_tree(file_name, tree):\n if not isinstance(file_name, str):\n raise ValueError(\"'str required here. Created by @Edd1e234'\")\n\n try:\n file = open(file_name, \"r\")\n except FileNotFoundError:\n print(\"File not found\")\n raise FileNotFoundError(file_name + \"not found...\")\n\n count = 0\n\n print(\"Inserting\")\n\n for line in file:\n words = line.split(\" \")\n if words[0].isalpha():\n # Checking if there actual float numbers.\n vector_list = []\n for i in words[1:]:\n try:\n vector_list.append(float(i))\n except ValueError:\n # Raises Exception\n raise ValueError(\"This file does not contain correct data. Does not contain float numbers. '\", i,\n \"' Created by @Edd1e234\")\n # Creates the object wrapper class.\n key = ObjectKey(words[0], vector_list)\n tree.insert(key)\n count += 1\n print(count)\n file.close()",
"def parse_tree_file(filename):\n f = open(filename)\n data = f.read()\n f.close()\n return parser.parse(data, lexer=lexer)",
"def deserialize(stringTree):\n global i #Global shared index \n\n if (stringTree[i] == \"#\"): #Empty node\n return None \n\n else:\n node = Node(stringTree[i]) #Create node of current index\n i+=1\n node.left = deserialize(stringTree) #Check left child\n i+=1\n node.right = deserialize(stringTree) #Check right child\n return node",
"def parse_file():\r\n # Open the text file as read only\r\n file = open(\"formulas.txt\", \"r\")\r\n\r\n # Iterate through each line in the file\r\n for formula in file:\r\n # Create a new tree based on the formula\r\n tree = parse_formula(formula.rstrip())\r\n # Formatting\r\n print(\"Formula: {}\".format(formula.rstrip()))\r\n print(\"Tree:\")\r\n tree.display()\r\n print(\"-----------------------------\")",
"def getNodeTreeFromStr(string):\n document = publish_doctree(string)\n\n return list([node for node in document.traverse()[1:] if node.parent == document]) # we've to return the chidren of the document, as returning the document itself, seems to duplicate the content of the current file being processed.",
"def REPL_file(\n input,\n parse_tree_fn: Callable,\n output_style_fn: Callable,\n session: bool,\n mode: str,\n show_tree_fn=None,\n debug=False,\n) -> None:\n in_count = 0\n for lineno, line in enumerate(open(input, \"r\").readlines()):\n line = line.strip()\n print(\"%3d: %s\" % (lineno + 1, line))\n if not line or line.startswith(\"(*\"):\n continue\n in_count += 1\n eval_one(\n in_str=line,\n parse_tree_fn=parse_tree_fn,\n output_style_fn=output_style_fn,\n mode=mode,\n session=session,\n show_tree_fn=show_tree_fn,\n debug=debug,\n )\n pass\n return",
"def parse(self, file):\n # The root tree\n tree = Tree()\n # Dictionary of subtrees that are created\n # The key is the name and the value is the corresponding TreeElement\n subtrees = dict()\n\n current_subtree = tree\n current_tree_element = None\n next_is_start = False\n next_is_comment = False\n comment = False\n last_indent = 0\n lnr = 0\n with open(file, 'r') as bfile:\n for line in bfile:\n lnr += 1\n comment = next_is_comment\n\n line = re.sub(r'//\\*\\*.*?\\*\\*//', '', line) # Block comments starting and ending in the same line\n\n if '**//' in line:\n # Block comments ending in this line\n # This line as well as the following will contain valid code\n next_is_comment = False\n comment = False\n line = re.sub(r'.*\\*\\*//', '', line)\n if '//**' in line:\n # Block comments starting in this line\n # This line may contain valid code, the next ones won't\n next_is_comment = True\n line = re.sub(r'//\\*\\*.*', '', line)\n\n line = re.sub(r'//.*', '', line) # Line comments\n\n line = line.rstrip()\n if not line:\n continue\n\n if not comment:\n indent = len(line) - len(line.lstrip())\n if indent % 4 != 0:\n raise ParseError('Error parsing line {}: Indent is not a multiple of 4'.format(lnr))\n\n line_content = line.lstrip()\n\n if indent == 0 and line_content.startswith('-->'):\n # This is the declaration of the start. Next line contains root element\n next_is_start = True\n current_subtree = tree\n last_indent = indent\n continue\n\n if next_is_start:\n # This line contains the root element of the main tree\n next_is_start = False\n element = self.create_tree_element(line_content, current_tree_element)\n tree.set_root_element(element)\n current_tree_element = element\n\n if indent == 0 and line_content.startswith('#'):\n # This is the declaration of a new subtree\n current_subtree = Tree()\n subtrees[line_content[1:]] = current_subtree\n current_tree_element = None\n last_indent = indent\n continue\n\n if indent < last_indent:\n # Go layers up, depending on indent difference\n for _ in range(indent, last_indent, 4):\n current_tree_element = current_tree_element.parent\n\n if re.search(r'\\s*-?->\\s*', line_content):\n # Arrow in line, split in decision result and call\n result, call = re.split(r'\\s*-?->\\s*', line_content, 1)\n\n if call.startswith('#'):\n # A subtree is called here.\n subtree_name = call.strip('#')\n if subtree_name not in subtrees:\n raise AssertionError('Error parsing line {}: {} not defined'.format(lnr, call))\n # The root element of the subtree should be placed in this tree position\n if current_tree_element is None:\n # The current subtree is empty, set the subtree as its root element\n current_subtree.set_root_element(subtrees[subtree_name].root_element)\n else:\n # Append this subtree in the current position\n current_tree_element.add_child_element(copy.copy(subtrees[subtree_name].root_element), result)\n\n elif re.search(r'\\s*,\\s*', call):\n # A sequence element\n actions = re.split(r'\\s*,\\s*', call)\n element = self.create_sequence_element(actions, current_tree_element)\n current_tree_element.add_child_element(element, result)\n\n elif call.startswith('@'):\n # An action is called\n element = self.create_tree_element(call, current_tree_element)\n current_tree_element.add_child_element(element, result)\n\n elif call.startswith('$'):\n # A decision is called\n element = self.create_tree_element(call, current_tree_element)\n current_tree_element.add_child_element(element, result)\n current_tree_element = element\n\n else:\n raise ParseError('Error parsing line {}: Element {} is neither an action nor a decision'.format(lnr, call))\n\n else:\n # No arrow, must be the beginning of a new subtree\n element = self.create_tree_element(line_content, current_tree_element)\n current_subtree.set_root_element(element)\n current_tree_element = element\n\n last_indent = indent\n return tree",
"def traverse(tree):\n nonlocal result\n\n symbol, children, *_ = tree\n\n if children:\n for c in children:\n if c[0].startswith(\"<\"):\n if not c[0].startswith(symbol_name[:-1]):\n if next_leaf(c):\n result += c[0].replace(\"<\", \"\").replace(\">\", \": \") + next_leaf_content(c) + \"\\n\"\n else:\n result += c[0].replace(\"<\", \"\").replace(\">\", \"\") + \" {\" + \"\\n\"\n traverse(c)\n result += \"}\" + \"\\n\"\n else:\n traverse(c) # do not update anything, just traverse",
"def file_reader(file_dir,\n word2id_dict,\n label2id_dict,\n word_replace_dict,\n filename_feature=\"\"):\n word_dict_len = max(map(int, word2id_dict.values())) + 1\n label_dict_len = max(map(int, label2id_dict.values())) + 1\n\n def reader():\n \"\"\"\n the data generator\n \"\"\"\n index = 0\n for root, dirs, files in os.walk(file_dir):\n for filename in files:\n for line in io.open(\n os.path.join(root, filename), 'r', encoding='utf8'):\n index += 1\n bad_line = False\n line = line.strip(\"\\n\")\n if len(line) == 0:\n continue\n seg_tag = line.rfind(\"\\t\")\n word_part = line[0:seg_tag].strip().split(' ')\n label_part = line[seg_tag + 1:]\n word_idx = []\n words = word_part\n for word in words:\n if word in word_replace_dict:\n word = word_replace_dict[word]\n if word in word2id_dict:\n word_idx.append(int(word2id_dict[word]))\n else:\n word_idx.append(int(word2id_dict[\"<UNK>\"]))\n target_idx = []\n labels = label_part.strip().split(\" \")\n for label in labels:\n if label in label2id_dict:\n target_idx.append(int(label2id_dict[label]))\n else:\n target_idx.append(int(label2id_dict[\"O\"]))\n if len(word_idx) != len(target_idx):\n print(line)\n continue\n yield word_idx, target_idx\n\n return reader",
"def make_tree_from_file_content(file_content):\r\n file_tokens = make_file_tokens(file_content)\r\n tree = movetree.MoveTree()\r\n root = movetree.Node()\r\n root.depth = -1\r\n tip = root\r\n branch_point_stack = []\r\n for token in file_tokens:\r\n if token == '(':\r\n branch_point_stack.append(tip)\r\n elif token == ')':\r\n tip = branch_point_stack.pop()\r\n else:\r\n new_move = move_from_token(token)\r\n tip.add_child(new_move)\r\n tip = new_move\r\n tree.info = make_info_node(root.children[0].properties)\r\n first_move = root.children[0].children[0]\r\n first_move.parent = tree.root_node\r\n tree.root_node.children.append(first_move)\r\n tree.current_move = tree.root_node\r\n return tree",
"def buildTrieSingle(self, file):\n pass",
"def treefile(filename):\n nobv.visual_treefile(filename)",
"def load_gcrf_tree_file(fname):\n with codecs.open(fname, encoding='utf-8') as f:\n ct = _load_gcrf_tree_file(f)\n return ct",
"def read(path):",
"def _read(self, in_file):\n in_file.read(18) # pad bytes\n self.numnod = int(in_file.read(12))\n in_file.read(37) # pad bytes\n self.format = int(in_file.read(1))\n in_file.read(1) # eol\n self.nodes = []\n\n for _ in range(self.numnod):\n node = FRDNode()\n self.nodes.append(node)\n if self.format < 2:\n in_file.read(1)\n node.key = int(in_file.read(2))\n node.number = int(in_file.read(5*(self.format+1)))\n node.pos = [float(in_file.read(12)) for j in range(3)]\n in_file.read(1) # eol\n else:\n node.number = struct.unpack('i', in_file.read(4))[0]\n if self.format == 2:\n node.pos = struct.unpack('fff', in_file.read(12))\n else:\n node.pos = struct.unpack('ddd', in_file.read(24))\n\n if self.format < 2:\n in_file.readline() # last record for ascii only",
"def read_in_file(self, file):\n with open(self.file) as doc:\n trie = dict()\n for line in doc:\n line = line.split(' ')[0]\n self.make_trie(trie, line.rstrip())\n return trie",
"def trees_from_file(this_class, filename):\n # see trees_from_string for an explanation\n trees = list(parser.inputTreesFromFile(filename))\n for tree in trees:\n tree.this.acquire()\n return list(map(this_class, trees))",
"def read_filter_tree(self,filename):\n root = Node([],0.0)\n curr_node = root\n good_nodes = []\n with open(filename,\"r\") as inp:\n line = inp.readline()\n while line != \"root\\n\":\n if line.isspace():\n line = inp.readline()\n if line[-1] == \"\\n\":\n line = line[:-1]\n filters = [int(filt) for filt in re.split(',+',line)]\n line = inp.readline()\n value = float(line[:-1])\n line = inp.readline()\n complete = bool(line[:-1])\n node = Node(filters,value)\n node.complete = complete\n good_nodes.append(node)\n line = inp.readline()\n\n line = inp.readline()\n while line != '':\n if line.isspace():\n line = inp.readline()\n if line[-1] == \"\\n\":\n line = line[:-1]\n filters = [int(filt) for filt in re.split(',+',line)]\n line = inp.readline()\n value = float(line[:-1])\n line = inp.readline()\n complete = bool(line[:-1])\n node = Node(filters,value)\n node.complete = complete\n if curr_node.is_child(node):\n curr_node.children.append(node)\n parent = curr_node\n curr_node = node\n curr_node.parent = parent\n elif curr_node.is_sibling(node):\n parent.children.append(node)\n curr_node = node\n curr_node.parent = parent\n else:\n while parent.is_child(node) is False:\n if parent == root:\n raise KeyError(\"No parent found for the node: \\n\"+str(node.filters)+\"\\n\")\n parent = parent.parent\n\n parent.children.append(node)\n curr_node = node\n curr_node.parent = parent\n\n line = inp.readline()\n return",
"def _mutate_file(self, node, visited = set([])):\n for ch in self._get_children(node):\n\n if ch not in visited:\n visited.add(ch)\n\n try:\n self._mutate_node(ch)\n except Exception as e:\n print(e)\n\n # Recursion is a bitch\n self._mutate_file(ch, visited)",
"def process(path):\n def extract(lines):\n \"\"\"Given a iterable of lines, extracts the useful information from it.\"\"\"\n prefix = 'Note: including file: '\n for line in lines:\n if line.startswith(prefix):\n line = os.path.normpath(line[len(prefix):])\n # Determine the depth by counting the number of spaces starting the line.\n depth = len(line) - len(line.lstrip()) + 1\n yield (depth, line.strip())\n\n with open(path) as fr:\n lines = iter(fr)\n root = next(lines)\n yield (None, root.strip())\n for depth, path in extract(fr):\n yield (depth, path)",
"def seperate_file(file):\n firstHalf = file.split(\"\\\\\"[-1])\n #print \"This is the node\", firstHalf[-2]\n node = firstHalf[-2]\n print \"\\nReading results for \", node\n return node",
"def loadfile(self,fd):\n pat=re.compile(r'!')\n f=self.files.index(fd)\n index=0\n newstack=0\n fnc={}\n inc={}\n thisline=[]\n for line in fd:\n line=line.strip()\n if pat.search(line):\n if newstack>0 and index>1:\n count=int(thisline[index-1])\n for i in range(index-1):\n fn=thisline[i]\n fn=re.sub('^.*(: |`)','',fn)\n fn=re.sub('\\/.*$','',fn)\n inc[fn]=inc.get(fn,0)+1\n fn=re.sub('\\+.*$','',fn)\n fnc[fn]=fnc.get(fn,0)+1\n if i==0:\n self.excl[f][fn]=self.excl[f].get(fn,0)+count\n else:\n fn=fn+\"+\"+prefunc\n prefunc=fn\n self.total[f]+=count\n for i in fnc:\n self.incl[f][i]=self.incl[f].get(i,0)+count*fnc[i]\n for i in inc:\n self.inst[f][i]=self.inst[f].get(i,0)+count*inc[i]\n self.caller_callee[f][fn]=self.caller_callee[f].get(fn,0)+count\n fnc.clear()\n inc.clear()\n del thisline[:]\n index=0\n\n newstack+=1\n continue\n\n if newstack>0:\n thisline += [line]\n index+=1",
"def lex(ged_file):\n root = Record(None, None, None)\n curr_for = [root]\n for line in ged_file:\n fields = line.split()\n if not fields:\n continue\n level = int(fields[0])\n if fields[1].startswith(\"@\"):\n # Ex: 0 @I6@ INDI\n rec_id = fields[1] # Ex: @I138@ or @F31@\n rec_type = fields[2] # Ex: INDI or FAM\n data = \" \".join(fields[3:])\n else:\n # Ex: 2 DATE 13 Dec 1985\n rec_id = None\n rec_type = fields[1] # Ex: NAME, DATE, PLAC, CHIL, ...\n data = \" \".join(fields[2:])\n this_rec = Record(rec_id, rec_type, data)\n\n # Find which record this is a sub-record of.\n parent = curr_for[int(level)]\n # Add as sub-record.\n parent.sub_recs.append(this_rec)\n # Update curr_for so that future records can be nested beneath this one.\n curr_for[level + 1:] = [this_rec]\n\n return root.sub_recs",
"def read_input(file_input):\n login = file_input.readline().strip()\n n, max_batch_size = map(int, file_input.readline().split())\n\n tree = Tree(n)\n for _ in range(n - 1):\n path = file_input.readline().strip()\n\n child_type = RecordType.Record\n if path[-1] == '/':\n child_type = RecordType.Folder\n path = path[:-1]\n\n path_tokens = path.split('/')\n parent_path, child_name = '/'.join(path_tokens[:-1]), path_tokens[-1]\n if parent_path == '':\n parent_path = '/'\n\n tree.add_edge(parent_path, child_name, child_type)\n tree.calculate_sizes()\n\n return login, max_batch_size, tree",
"def get_characters(root):\n def glob_defs(filenames):\n return [i for i in filenames if i[-3:].lower() == \"def\"]\n\n # glob the characters and group by subdirectory\n def glob_chars(path):\n pushback = []\n\n paths = [join(path, i) for i in os.listdir(path)]\n dirnames = [i for i in paths if os.path.isdir(i)]\n dirnames.reverse()\n\n while dirnames:\n path = dirnames.pop()\n defs = glob_defs(os.listdir(path))\n paths = [join(path, i) for i in os.listdir(path)]\n subdirs = [i for i in paths if os.path.isdir(i)]\n\n if subdirs:\n pushback.append(path)\n\n while defs:\n yield join(path, defs.pop())\n\n while pushback:\n path = pushback.pop()\n for char in glob_chars(path):\n yield char\n\n for path in glob_chars(root):\n d = parse_def(path)\n name = fix_name(os.path.split(dirname(path))[1])\n if not verify_name_matches_def(name, path):\n name = None\n try:\n yield Character(d['name'], name, path)\n except KeyError:\n pass",
"def parse(self, filepath, top=None):\n\n\t\twith open(filepath, 'r') as f:\n\t\t\tcontent = f.read()\n\t\t\t# Starting index of the character content.\n\t\t\tstart_index = content.find('</h1>') + 5\n\t\t\t# Ending index of the character content.\n\t\t\tend_index = content.rfind('<h1>')\n\t\t\t# Character content string.\n\t\t\tcharacter_content = content[start_index:end_index]\n\n\t\t\t# List of characters, with each character is represented as a dictionary\n\t\t\t# with character name (The first one listed), character count, and a\n\t\t\t# list of aliases, with each alias represented as a dictionary with\n\t\t\t# alias span and count.\n\t\t\tcharacters = []\n\n\t\t\ti = 0\n\t\t\t# Character aliases are listed on separate lines.\n\t\t\tfor l in character_content.split('<br />'):\n\t\t\t\t# Only retain the top characters.\n\t\t\t\tif l.strip() != \"\" and (top is None or i < top):\n\t\t\t\t\tcharacter = {}\n\n\t\t\t\t\t# The character count is the number starting the line.\n\t\t\t\t\tcharacter_count = re.search(r'^\\d+', l)\n\t\t\t\t\tif character_count:\n\t\t\t\t\t\tcount = int(character_count.group())\n\t\t\t\t\t\tif count == 0:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcharacter['count'] = count\n\t\t\t\t\telse:\n\t\t\t\t\t\t# Reached the text portion.\n\t\t\t\t\t\tif l.startswith(\"<h1>\"):\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tException(\"Failed to parse character count in \" +\n\t\t\t\t\t\t\t\tfilepath + \" on line: \" + l)\n\n\t\t\t\t\t# Drop the starting character count.\n\t\t\t\t\tparsed_l = l[character_count.end():].strip()\n\n\t\t\t\t\t# The first name is taken as the sequence of characters after \n\t\t\t\t\t# the character count leading up to the first '(' (without any\n\t\t\t\t\t# leading or trailing whitespace), i.e. the first alias span.\n\t\t\t\t\tfirst_name = re.search(r'^([^\\(]+)\\s+\\(', parsed_l)\n\t\t\t\t\tif first_name:\n\t\t\t\t\t\tcharacter['entity'] = first_name.group(1)\n\t\t\t\t\telse:\n\t\t\t\t\t\traise Exception(\"Failed to parse first character name \"\n\t\t\t\t\t\t\t\"in \" + filepath + \" on line: \" + l)\n\n\t\t\t\t\talias_matches = list(re.finditer(r'([^(\\(\\))]+)\\s+\\((\\d+)\\)',\n\t\t\t\t\t\tparsed_l))\n\t\t\t\t\tif len(alias_matches) > 0:\n\t\t\t\t\t\tcharacter['aliases'] = [{\n\t\t\t\t\t\t\t\t\t\t\t\t\t'alias': m.group(1).strip(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t'count': int(m.group(2))\n\t\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t\t\tfor m\n\t\t\t\t\t\t\t\t\t\t\t\tin alias_matches]\n\t\t\t\t\telse:\n\t\t\t\t\t\traise Exception(\"Failed to parse aliases in \" +\n\t\t\t\t\t\t\tfilepath + \" on line: \" + l)\n\n\t\t\t\t\tcharacters.append(character)\n\t\t\t\t\ti += 1\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\n\t\t\treturn characters",
"def __parse(self):\n # raw/objects: detect name, type, use major tag for type as parent node\n # raw/graphics: as object raw, but add TILE_PAGE\n # init: usually flat file, except\n # embark_profiles.txt: [PROFILE] is parent\n # interface.txt: [BIND] is parent (legacy will be flat)\n # world_gen.txt: [WORLD_GEN] is parent\n # Non-raw files (unsupported): init/arena.txt, subdirs of raw/objects\n parse_raw(self, self.read(self.filename))",
"def compile_file(self, filename):\n if self.basepath is None:\n self.basepath = os.path.split(filename)\n\n i = 1\n txt = \"\"\n with open(filename, \"r\") as reader:\n for line in reader:\n if line != \"\\n\":\n txt += line\n debug(logger, \"*** [%d] %s\" % (i, line))\n if balanced(txt) == 0:\n print(self.parseit(txt))\n txt = \"\"\n i = i + 1\n\n if len(txt):\n print(\"Error: missing ()'s, %s\" % txt)"
]
| [
"0.57345337",
"0.5500208",
"0.54999906",
"0.54994",
"0.54745203",
"0.53851414",
"0.5351733",
"0.5224818",
"0.5221484",
"0.5204018",
"0.5189784",
"0.51858944",
"0.51852345",
"0.5182749",
"0.51462406",
"0.51426816",
"0.5098841",
"0.50946444",
"0.50844306",
"0.50804096",
"0.50698864",
"0.5062209",
"0.5051241",
"0.5035627",
"0.50351685",
"0.5026732",
"0.5022375",
"0.5013027",
"0.50061613",
"0.4995158"
]
| 0.6211928 | 0 |
queries the given url and places in the params and headers into the request if present. | def query(url, params=None, headers_param=None):
if params is None:
params = {}
logging.info("url={0}\tparams={1}".format(url, params))
headers = {
'Referer': url,
"Content-Type": "text/xml; charset=UTF-8", # implement after checking if this doesn't kill the other scripts
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',
}
if headers_param is not None:
# mergers the headers into one so that the basic headers don't have to duplicated
for k in headers_param.keys():
headers[k] = headers_param[k]
session = requests.session()
result = session.get(
url,
cookies=requests.utils.cookiejar_from_dict(requests.utils.dict_from_cookiejar(session.cookies)),
headers=headers,
params=params
).text
time.sleep(RATE_LIMITING)
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def query(url):",
"def request(url, headers, params, method=\"GET\"):\n url = UrlManipulation.replace_query_params(url, **params)\n logger.debug(f\"request(): {method} {url}\")\n req = urllib.request.Request(url, headers=headers, method=method)\n try:\n with urllib.request.urlopen(req) as resp:\n code = resp.code\n resp_headers = resp.info()\n data = resp.read()\n except urllib.error.HTTPError as e:\n code = e.code\n resp_headers = e.headers\n data = e.read()\n except urllib.error.URLError as e:\n raise QueryError(f\"URLError: {e.reason}\")\n\n if resp_headers.get(\"Content-Encoding\", None) == \"gzip\":\n logger.debug(\"Decompress response\")\n data = gzip.decompress(data)\n\n return code, resp_headers, data.decode()",
"def _get_request(url, params):\n request = requests.get(url, params=params)\n\n return request",
"def req_get(url, headers=None, params=None) -> Response:\n if params:\n url = \"{}?{}\".format(url, parse.urlencode(params))\n\n req = Request(url, headers=headers, method=\"GET\")\n\n with request.urlopen(req) as res:\n response = Response(res)\n return response",
"def __call__(self, request):\n if self.where == \"qs\":\n parts = urlparse(request.url)\n qs = parse_qs(parts.query)\n qs[self.qs_key] = self.token\n request.url = urlunparse(\n (\n parts.scheme,\n parts.netloc,\n parts.path,\n parts.params,\n urlencode(qs),\n parts.fragment,\n )\n )\n elif self.where == \"header\":\n request.headers[\"Authorization\"] = \"Bearer {}\".format(self.token)\n return request",
"def _process_query(self, url, query=None, add_authtok=True):\n if add_authtok:\n if self.authtok == '':\n self._error('No auth token, must login first')\n return False\n if query is None:\n query = {}\n query.update({'auth': self.authtok, 'email': self.email})\n\n if len(query) > 0:\n request = url + '?' + urllib.urlencode(query)\n else:\n request = url\n\n self.api_count += 1\n try:\n fh = urllib2.urlopen(request)\n response = fh.read()\n fh.close()\n except urllib2.HTTPError, e:\n # Received a non 2xx status code\n raise SimplenoteError('http error: {}'.format(e.code))\n except urllib2.URLError, e:\n # Non http error, like network issue\n raise SimplenoteError('url error: {}'.format(e.reason))\n return json.loads(response)",
"def request(query):",
"def preQuery(self):\n self.request_url = self.url\n pass",
"def _request(self, url, params, base_url=None, first_request_time=None, verbose=False, requests_kwargs=None):\n\n if not first_request_time:\n first_request_time = datetime.now()\n\n if base_url is None:\n base_url = self.base_url\n\n elapsed = datetime.now() - first_request_time\n # TODO: to catch timeouts\n # if elapsed > self.retry_timeout:\n # raise TimeOutException()\n\n # create url :: self._generate_query_url(url, params)\n query_url = url\n\n # url encoding of params\n # TODO: use urlencoding here on params\n\n requests_kwargs = requests_kwargs or {}\n final_requests_kwargs = dict(self.requests_kwargs, **requests_kwargs)\n\n # method\n requests_method = self.session.get\n\n try:\n response = requests_method(\n base_url + query_url,\n params=params,\n **final_requests_kwargs)\n\n # temporary, for logging\n if verbose:\n pretty_print_POST(response.request)\n\n except requests.exceptions.Timeout:\n raise TimeOutException()\n except Exception as e:\n raise TransportError(e)\n\n result = self._get_body(response)\n\n return result",
"def get_request(url, params={}):\n if isinstance(params, dict):\n if len(params) > 0:\n url += \"?\" + urllib.parse.urlencode(params)\n else:\n raise TypeError(\"data must be a dict\")\n headers = {}\n github_token = os.environ.get(\"GITHUB_TOKEN\")\n if github_token:\n headers[\"Authorization\"] = f\"Bearer {github_token}\"\n return urllib.request.Request(url, headers=headers)",
"def _perform_http_request(self, url, data=None, headers=(), timeout=7.0):\n if self._is_first_request:\n self._is_first_request = False\n self._handle_first_request()\n\n if data is not None:\n if isinstance(data, dict) or isinstance(data, list):\n data = urlencoder.urlencode(data)\n else:\n raise RouterFetchError(\n 'POST data should be a dict, a list or None!'\n )\n\n try:\n req = requestor.Request(url, data)\n for header, value in headers:\n req.add_header(header, value)\n with contextlib.closing(requestor.urlopen(req, timeout=timeout)) as handle:\n self._is_logged_in = True\n return (\n handle.geturl(),\n handle.info(),\n handle.read().decode('utf-8', 'ignore')\n )\n except Exception as e:\n raise RouterFetchError('Failed making request: %s' % repr(e))",
"def _get_request(self, search_url, params, timeout=3.):\n\n try:\n response = self._SESSION.get(search_url, params=params, timeout=timeout)\n except requests.Timeout as err:\n logger.error(f'Connection timeout: {err}')\n return None\n except requests.ConnectionError as err:\n logger.error(f'Network problem occurred: {err}')\n return None\n\n if not response.ok:\n logger.warning(f'Register code: {params[\"q\"]}; HTTP Error: {response.status_code}')\n return None\n\n return response",
"def _external_request(self, method, url, *args, **kwargs):\n self.last_url = url\n if url in self.responses.keys() and method == 'get':\n return self.responses[url] # return from cache if its there\n\n headers = kwargs.pop('headers', None)\n custom = {'User-Agent': useragent}\n if headers:\n headers.update(custom)\n kwargs['headers'] = headers\n else:\n kwargs['headers'] = custom\n\n response = getattr(requests, method)(url, *args, **kwargs)\n\n if self.verbose:\n print(\"Got Response: %s\" % url)\n\n if response.status_code == 503:\n raise SkipThisService(\"Service returned 503 - Temporarily out of service.\")\n\n if method == 'get':\n self.responses[url] = response # cache for later\n\n self.last_raw_response = response\n return response",
"def _CreateRequest(self, url, data=None):\n logging.debug(\"Creating request for: '%s' with payload:\\n%s\", url, data)\n req = urllib2.Request(url, data=data)\n if self.host_override:\n req.add_header(\"Host\", self.host_override)\n for key, value in self.extra_headers.iteritems():\n req.add_header(key, value)\n return req",
"def _get(self, url, **queryparams):\n url = urljoin(self.base_url, url)\n if len(queryparams):\n url += '?' + urlencode(queryparams)\n try:\n r = self._make_request(**dict(\n method='GET',\n url=url,\n auth=self.auth,\n timeout=self.timeout,\n hooks=self.request_hooks,\n headers=self.request_headers\n ))\n except requests.exceptions.RequestException as e:\n raise e\n else:\n if r.status_code >= 400:\n _raise_response_error(r)\n return r.json()",
"def _perform_request(cls, url='', request_type='GET', params=None):\n\n get = 'GET'\n post = 'POST'\n delete = 'DELETE'\n put = 'PUT'\n\n if params:\n params = json.dumps(params)\n\n if cls.user_name is None or not cls.user_name:\n raise AuthError(\"Missing user name. Please provide a valid user name.\")\n if cls.password is None or not cls.password:\n raise AuthError(\"Missing password. Please provide a valid password.\")\n\n url = cls.end_point + url\n\n if request_type.upper() == get:\n return cls.session.get(url, stream=False)\n elif request_type.upper() == post:\n logging.info('{0} - {1}'.format(request_type, params))\n return cls.session.post(url, data=params, stream=False)\n elif request_type.upper() == delete:\n logging.info('{0} - {1}'.format(request_type, params))\n return cls.session.delete(url, stream=False)\n elif request_type.upper() == put:\n logging.info('{0} - {1}'.format(request_type, params))\n return cls.session.put(url, data=params, stream=False)",
"def get(self, url, query=None):\n # Perform get request with query filter\n if query is not None:\n return self._query(url, 'GET', params=quote(f'query=\"{query}\"'))\n\n # Perform simple get request\n return self._query(url, 'GET')",
"def merge_url(url, params):\n req = PreparedRequest()\n req.prepare_url(url, params)\n return req.url",
"def get(self, method, uri, query_param, request_param, headers, **kwargs):\n raise NotImplementedError",
"def request(self, url, *args, **kwargs):\n raise NotImplementedError",
"def _request(self, url, method=\"GET\", data=None):\n\t\t# TODO: exception handling\n\t\tif self.logged_in:\n\t\t\tcookie_list = [\"{}={}\".format(k, v) for k, v in self.cookies.iteritems()]\n\t\t\theaders = {'Cookie': \"; \".join(cookie_list)}\n\t\telse:\n\t\t\theaders = {}\n\t\tself.connection.request(method, self.base_path + url, data, headers)\n\t\treturn self.connection.getresponse()",
"def _request(self, opts, query, query_key='q'):\n params = opts['params']\n params[query_key] = query\n resp = requests.get(opts['url'], params=params, headers=self._headers)\n if not resp.ok:\n raise Exception(\"Server threw an error for: {}\".format(resp.url))\n return resp.json()",
"def _CreateRequest(self, url, data=None):\r\n logging.debug(\"Creating request for: '%s' with payload:\\n%s\", url, data)\r\n req = urllib2.Request(url, data=data, headers={\"Accept\": \"text/plain\"})\r\n if self.host_override:\r\n req.add_header(\"Host\", self.host_override)\r\n for key, value in self.extra_headers.iteritems():\r\n req.add_header(key, value)\r\n return req",
"def _get(self, url):\n return self._request(url)",
"def http_request(url, query=None, method=None, headers={}, data=None):\n parts = vars(urllib.parse.urlparse(url))\n if query:\n parts['query'] = urllib.parse.urlencode(query)\n\n url = urllib.parse.ParseResult(**parts).geturl()\n r = urllib.request.Request(url=url, method=method, headers=headers,\n data=data)\n with urllib.request.urlopen(r) as resp:\n msg, resp = resp.info(), resp.read()\n\n if msg.get_content_type() == 'application/json':\n resp = json.loads(resp.decode('utf-8'))\n\n return msg, resp",
"def send_request(url, params=None):\n try:\n prepped_request = requests.Request(\n \"GET\", url, params=params, auth=(USERNAME, TOKEN)\n ).prepare()\n r = fetch_url(prepped_request, session=SESSION)\n if isinstance(r, requests.Response):\n return r.json()\n except Exception as e:\n logger.warning(str(e))\n return None",
"def _request(self, url):\n response = requests.get(url, headers=self.header)\n\n if str(response.status_code).startswith('2'):\n return response\n\n raise Exception(\"URI request returned an error. Error Code \" + str(response.status_code))",
"def _request(self, method, *args, **kwargs):\n if not \"headers\" in kwargs:\n kwargs[\"headers\"] = self._headers\n return self._session.request(method, self._url(*args), **kwargs)",
"def _make_request_with_auth_fallback(self, url, headers=None, params=None):\n self.log.debug(\"Request URL and Params: %s, %s\", url, params)\n try:\n resp = requests.get(\n url,\n headers=headers,\n verify=self._ssl_verify,\n params=params,\n timeout=DEFAULT_API_REQUEST_TIMEOUT,\n proxies=self.proxy_config,\n )\n resp.raise_for_status()\n except requests.exceptions.HTTPError as e:\n self.log.debug(\"Error contacting openstack endpoint: %s\", e)\n if resp.status_code == 401:\n self.log.info('Need to reauthenticate before next check')\n\n # Delete the scope, we'll populate a new one on the next run for this instance\n self.delete_current_scope()\n elif resp.status_code == 409:\n raise InstancePowerOffFailure()\n elif resp.status_code == 404:\n raise e\n else:\n raise\n\n return resp.json()",
"def requester(url, params={}, headers={}, verbose=0):\n\n # Try up to 3 times\n retry = requests.Session()\n retry.mount(\"https://\", requests.adapters.HTTPAdapter(max_retries=3))\n raw = retry.get(url=url, params=params, headers=headers)\n if verbose >= 2:\n print(\"Queried: %s\" % raw.url)\n return raw"
]
| [
"0.71451706",
"0.64857244",
"0.6369296",
"0.6307592",
"0.63027126",
"0.61929387",
"0.6177525",
"0.6103905",
"0.60760415",
"0.6020824",
"0.60137224",
"0.597998",
"0.59702086",
"0.5964929",
"0.59598595",
"0.5957351",
"0.5942728",
"0.59328884",
"0.59190345",
"0.59136254",
"0.58653945",
"0.58346856",
"0.5832734",
"0.57992506",
"0.5778621",
"0.57722676",
"0.5764631",
"0.57638407",
"0.5755377",
"0.57526296"
]
| 0.6601044 | 1 |
Make the functions for adding filters and add them to the namespace automatically. | def _make_functions(namespace):
for fil in registry.filters:
func_name = camel2enthought(fil.id)
class_name = fil.id
if func_name.endswith('_filter'):
func_name = func_name[:-7]
class_name = class_name[:-6]
class_name = class_name + 'Factory'
# Don't create any that are already defined.
if class_name in namespace:
continue
# The class to wrap.
klass = new.classobj(class_name,
(_AutomaticFilterFactory,),
{'__doc__': fil.help,}
)
klass._metadata = fil
# The mlab helper function.
func = make_function(klass)
# Inject class/function into the namespace and __all__.
namespace[class_name] = klass
namespace[func_name] = func
__all__.append(func_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_custom_filters(environment):\n\n # TODO deprecate ipaddr_index and netmask for the better ipnet ones\n filter_list = {\n 'dpkg_arch': filter_dpkg_arch,\n 'storage_size_num': filter_storage_size_num,\n 'ipnet_hostaddr': filter_ipnet_hostaddr,\n 'ipnet_hostmin': filter_ipnet_hostmin,\n 'ipnet_hostmax': filter_ipnet_hostmax,\n 'ipnet_broadcast': filter_ipnet_broadcast,\n 'ipnet_netmask': filter_ipnet_netmask,\n 'ipnet_contains_ip': filter_ipnet_contains_ip,\n 'ipnet_contains_iprange': filter_ipnet_contains_iprange,\n 'ipnet_range_size': filter_ipnet_range_size,\n 'ipaddr_index': filter_ipaddr_index,\n 'netmask': filter_netmask\n }\n\n for name, function in filter_list.items():\n environment.filters[name] = function",
"def _add_filters(self, filters):\n self._env.filters['dateformat'] = dateformat\n self._env.filters.update(filters or {})",
"def RegisterCommonFilters(filtermap):\n\n # General casing for output naming\n filtermap['camelcase'] = stringcase.camelcase\n filtermap['capitalcase'] = stringcase.capitalcase\n filtermap['constcase'] = stringcase.constcase\n filtermap['pascalcase'] = stringcase.pascalcase\n filtermap['snakecase'] = stringcase.snakecase\n filtermap['spinalcase'] = stringcase.spinalcase",
"def apply_filters(self, new_filters):\n\t\tself.filters = new_filters",
"def get_filters(self):",
"def filter(self, filters):",
"def register(self, filter_name, filter_func):\n self._filters[filter_name] = filter_func",
"def register_filters(app):\n #\n # Conversion of Markdown to HTML\n Misaka(app, autolink=True, fenced_code=True,\n strikethrough=True, tables=True)\n\n # Registration of handlers for micawber\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(\n value, app.oembed_providers, maxwidth=600, maxheight=400\n )\n\n # Timezone helper\n app.tz = timezone(app.config['TIME_ZONE'])\n\n # Lambda filters for safe image_url's\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&=')\n\n # Custom filters\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default=\"now!\", until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None: return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None: return ''\n return value.strftime(format)",
"def add_filters_from_module(self, filter_functions):\n\n super(BigqueryInsertFilterMixin, self).add_filters_from_module(filter_functions)\n\n self.bigquery_insert_filter = getattr(\n filter_functions,\n self.bigquery_insert_filter\n )",
"def __init__(self, filters, use_include_order):\n self.filters = filters\n self.use_include_order = use_include_order",
"def test_filter_function_settings(self):\n def foo():\n \"\"\"Dummy function.\"\"\"\n return True\n\n self.es.register_filter(foo)\n self.assertEqual(self.es.filter['all'][0], foo)\n self.assertEqual(self.es.filter['any'], [])\n self.assertEqual(self.es.filter['none'], [])\n\n self.es.register_filter(foo, ftype='none')\n self.assertEqual(self.es.filter['all'][0], foo)\n self.assertEqual(self.es.filter['any'], [])\n self.assertEqual(self.es.filter['none'][0], foo)\n\n self.es.register_filter(foo, ftype='any')\n self.assertEqual(self.es.filter['all'][0], foo)\n self.assertEqual(self.es.filter['any'][0], foo)\n self.assertEqual(self.es.filter['none'][0], foo)",
"def module(filter_):\n def decorator(module_fn):\n \"\"\"Decorates a module function.\"\"\"\n _FILTERS_AND_SAMPLERS.append((filter_, module_fn))\n return module_fn\n return decorator",
"def load_all_filters(self, interp=True, lamb=None):\n raise NotImplementedError",
"def std_filters():\n kwargs = {\n \"sentence_filters\":[punctuation_filter],\n \"word_filters\":[small_word_filter, stopword_filter, stemming_filter]\n }\n return kwargs",
"def add_filter(self, f):\n raise NotImplementedError",
"def get_filters() -> List[Tuple[str, Callable]]:\n return [\n ('group_files', group_files),\n ('timesince', timesince),\n ('just_updated', just_updated),\n ('get_category_name', get_category_name),\n ('process_status_display', process_status_display),\n ('compilation_status_display', compilation_status_display),\n ('duration', duration),\n ('tidy_filesize', tidy_filesize),\n ('asdict', asdict),\n ('compilation_log_display', compilation_log_display)\n ]",
"def load_filters(self, names, interp=True, lamb=None, filterLib=None):\n with self as s:\n filters = [s._load_filter(fname, interp=interp, lamb=lamb)\n for fname in names]\n return(filters)",
"def decorator(module_fn):\n _FILTERS_AND_SAMPLERS.append((filter_, module_fn))\n return module_fn",
"def request_filter(self, fn):\n self.request_filters.append(fn)\n return fn",
"def configure_filters(app):\n\n for (name, filter) in _filters.iteritems():\n app.jinja_env.filters[name] = filter",
"def filters(self, filters):\n\n self._filters = filters",
"def load_filters(self, names, interp=True, lamb=None, filterLib=None):\n filters = [self._load_filter(fname, interp=interp, lamb=lamb)\n for fname in names]\n return(filters)",
"def _set_filters(self, options):\n if options.keywords:\n self.filters[\"keywords\"] = string_to_list(options.keywords)\n if options.features:\n self.filters[\"features\"] = string_to_list(options.features)\n if options.authors:\n self.filters[\"authors\"] = string_to_list(options.authors)\n if options.version:\n self.filters[\"version\"] = options.version",
"def addAutoSaveFilter(filter):",
"def _import_custom(self, custom_modules):\n for filter_module in custom_modules:\n info('Loading {}'.format(filter_module))\n funs = module_utils.get_all_functions(filter_module)\n for fun_name, fun in funs.items():\n if fun_name.startswith('function'):\n import_name = '_'.join(fun_name.split('_')[1:])\n debug('Adding function {}'.format(import_name))\n self._functions[import_name] = fun\n elif fun_name.startswith('filter'):\n import_name = '_'.join(fun_name.split('_')[1:])\n debug('Adding filter {}'.format(import_name))\n self._filters[import_name] = fun",
"def standard_filters():\n classes = []\n filters_dir = __path__[0]\n for dirpath, dirnames, filenames in os.walk(filters_dir):\n relpath = os.path.relpath(dirpath, filters_dir)\n if relpath == '.':\n relpkg = ''\n else:\n relpkg = '.%s' % '.'.join(relpath.split(os.sep))\n for fname in filenames:\n root, ext = os.path.splitext(fname)\n if ext != '.py' or root == '__init__':\n continue\n module_name = \"%s%s.%s\" % (__package__, relpkg, root)\n mod_classes = _get_filter_classes_from_module(module_name)\n classes.extend(mod_classes)\n return classes",
"def loadFilters(ufo):\n preFilters, postFilters = [], []\n for filterDict in ufo.lib.get(FILTERS_KEY, []):\n namespace = filterDict.get(\"namespace\", \"ufo2ft.filters\")\n try:\n filterClass = getFilterClass(filterDict[\"name\"], namespace)\n except (ImportError, AttributeError):\n from pprint import pformat\n\n logger.exception(\"Failed to load filter: %s\", pformat(filterDict))\n continue\n filterObj = filterClass(\n *filterDict.get(\"args\", []),\n include=filterDict.get(\"include\"),\n exclude=filterDict.get(\"exclude\"),\n pre=filterDict.get(\"pre\", False),\n **filterDict.get(\"kwargs\", {}),\n )\n if filterObj.pre:\n preFilters.append(filterObj)\n else:\n postFilters.append(filterObj)\n return preFilters, postFilters",
"def _load_filter(self, *args, **kwargs):\n raise NotImplementedError",
"def filter(self, name=None):\n def wrapper(fn):\n if name is not None:\n _name = name\n else:\n _name = fn.__name__\n\n if _name in self._filters:\n raise Error(\"Filter already defined: {0}\".format(_name))\n\n self._filters[_name] = fn\n return fn\n return wrapper",
"def apply_filters(filters, items):\n return scom.apply_filters(filters, items)"
]
| [
"0.6854986",
"0.65905815",
"0.6556149",
"0.6516991",
"0.6503513",
"0.6503041",
"0.649308",
"0.64256465",
"0.6318187",
"0.6257711",
"0.62176806",
"0.6180177",
"0.61334944",
"0.6132359",
"0.6121357",
"0.61159486",
"0.6114769",
"0.609798",
"0.6030215",
"0.6022262",
"0.6016461",
"0.6006878",
"0.59204084",
"0.59122443",
"0.58835626",
"0.58815044",
"0.5868932",
"0.58624756",
"0.5857093",
"0.5818796"
]
| 0.7930712 | 0 |
Sends start cmd to RPKI Validator | def _start_validator(self):
logging.info("Starting RPKI Validator")
utils.run_cmds((f"cd {self.rpki_package_path} && "
f"./{self.rpki_run_name}")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_execution(self):\n self.send_message(\"control.start\",None)",
"async def start_validators(self):\n if self.is_client():\n return\n\n await sleep(random.random() * 3)\n\n cmd = \"/home/martijn/stellar-core/stellar-core run\"\n out_file = open(\"stellar.out\", \"w\")\n self.validator_process = subprocess.Popen(cmd.split(\" \"), stdout=out_file, stderr=out_file)",
"def startRun(self):\r\n #Ask user for verification\r\n usrData = self.getCurrentUserData()\r\n msg = QMessageBox()\r\n msg.setIcon(QMessageBox.Information)\r\n msg.setWindowTitle(\"Run data verification\")\r\n msg.setText((\"You are about to start a run for user %s with ID %s. \" +\r\n \"Please make sure this is correct. You can abort the run at any time \" +\r\n \"by pressing the Escape button.\") % (usrData['Name'], usrData['User_ID']))\r\n msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\r\n retval = msg.exec_()\r\n\r\n #Only start run if user pressed OK\r\n if retval == QMessageBox.Ok:\r\n self.runController = RunController(parent =self)",
"def start():",
"def start():",
"def start():",
"def start():",
"def TerminalClientStart(self):\n pass",
"def start(self):\n control_process = mp.Process(target = self._start, args = [])\n control_process.start()",
"def start(self):\n cmd = self.doCommand(self.args)\n if cmd is not None:\n cmd.join()\n else:\n self.out = self.error",
"def startCommand(self):\n commandLine = \"su - %s -c \\\"%s/startservers \\\" \" % (self.runAsUser, self.boHome)\n return self.submitCommand(commandLine)",
"def start(self):\n self.start_time = dt.datetime.now()\n self.call = ' '.join(sys.argv)\n self.commands = []",
"def manual_start(self):\n self.manual_seqnum = 0\n return self.send(\"app_rc_start\")",
"def start(self):\n (self.options, self.arguments) = self.__opt_parser.parse_args()\n\n for opt_name in self.__req_option:\n if not getattr(self.options, opt_name):\n self.__opt_parser.error(\"Required option '%s' not set!\" %\n opt_name)\n\n # Capture warning and critical thresholds if there were any.\n # These can be overridden by API calls.\n\n if hasattr(self.options, 'warning'):\n self.__warning = getattr(self.options, 'warning')\n\n if hasattr(self.options, 'critical'):\n self.__critical = getattr(self.options, 'critical')\n\n self.__started = True\n self.__exit_status = 'OK'",
"def start(self):\n ...",
"def start(self):\n\t\treturn Job(SDK.PrlVm_Start(self.handle)[0])",
"def Start(self):\n\n\n\n assert not self._process, 'Start() can only be called once'\n self._process = subprocess.Popen(self._args)",
"def start_bot(self):\n self.proc = subprocess.Popen(\"./start\", stdin=subprocess.PIPE,\n\t\t\t\t\t\t\t\t\t stdout=subprocess.PIPE,\n\t\t\t\t\t\t\t\t\t cwd=os.path.abspath(self.path))",
"def start_procedure(self):\n pass",
"def start(self, index='0-'):\n # For FP 35958\n\t# 40168: updated the codenomicon version to 121\t\n codyToolCommand = 'java -Xmx256M -jar /home/test/codenomicon/h323-121.jar '\n codyToolCommand += '--no-gui '\n codyToolCommand += '--index %s ' % index\n codyToolCommand += '--host %s ' % self.serverAddr\n codyToolCommand += '--h323-host %s ' % self.serverAddr\n codyToolCommand += '--local-host %s ' % self.myAddr\n codyToolCommand += '--number 6024441111 ' \n codyToolCommand += '--log-dir %s/cody ' % self.workDir\n print 'Cody command is :%s:' % codyToolCommand\n self.log.debug('Codenomicon: start command: %s' % codyToolCommand)\n self.localShell.sendline(codyToolCommand)\n\n # Made the changes for ticket 35958\n #i = self.localShell.expect_list(self.errorPatterns, 2)\n #if (i != 0):\n # warningMessage = 'h323-121.jar file is not in CLASSPATH. Following steps should be performed: ' + \\\n # 'Correct the CLASSPATH in .bashrc or .nextestrc file'\n # raise EnvironmentError,warningMessage",
"def Start(self) :\n\t\t...",
"def start( self ):\n pathCheck( self.command )\n cout = '/tmp/' + self.name + '.log'\n if self.cdir is not None:\n self.cmd( 'cd ' + self.cdir )\n self.cmd( self.command + ' ' + self.cargs % self.port +\n ' 1>' + cout + ' 2>' + cout + '&' )\n self.execed = False",
"def _start(self):",
"def start(self) -> None:\n ...",
"def start(self) -> None:\n ...",
"def camstart():\n\n\trespond = send_command('camstart')",
"def start(self, suite, args=[]):\n self.suite = suite\n self.output_file = \"%s_Output.xml\" % (self.name)\n temp, suiteName = os.path.split(payload) \n jyLog = open(os.path.join(logFolder, (\"%s_Log.txt\" % self.name)), \"w\") \n jybotCommand = \"pybot -o %s %s\" % (os.path.join(logFolder, self.output_file), self.suite)\n \n print \"Executing : %s ...\" % jybotCommand\n self.running = True\n self.process = subprocess.Popen([\"pybot\", \"-o\", \"%s\" % os.path.join(logFolder, self.output_file), \"%s\" % self.suite], cwd=clientCwd, stdout=jyLog, stderr=jyLog)",
"def start(self, sniffer):\n pass",
"def cmd_start(self, app_name=None):\n rc = self.socket_command_with_project('start', app_name)\n return rc",
"def start(self):\r\n pass"
]
| [
"0.6607596",
"0.6571173",
"0.6326597",
"0.6210207",
"0.6210207",
"0.6210207",
"0.6210207",
"0.6158169",
"0.613642",
"0.6122255",
"0.60959846",
"0.6072968",
"0.60660356",
"0.60612607",
"0.60051334",
"0.5982089",
"0.5948722",
"0.592256",
"0.589133",
"0.58726084",
"0.5835454",
"0.5828774",
"0.582233",
"0.5817564",
"0.5817564",
"0.5801536",
"0.5798517",
"0.57979155",
"0.57735413",
"0.57711935"
]
| 0.74625295 | 0 |
Loads all trust anchors | def load_trust_anchors(self):
utils.write_to_stdout(f"{datetime.now()}: Loading RPKI Validator\n",
logging.root.level)
time.sleep(60)
while self._get_validation_status() is False:
time.sleep(10)
utils.write_to_stdout(".", logging.root.level)
utils.write_to_stdout("\n", logging.root.level)
self._wait(30, "Waiting for upload to bgp preview") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def FullTrustAssemblies(self) -> _n_1_t_2:",
"def UserApplicationTrusts(self) -> ApplicationTrustCollection:",
"def FullTrustAssemblies(self) -> _n_2_t_0[StrongName]:",
"def ApplicationTrustManager(self) -> IApplicationTrustManager:",
"def load_verify_locations(self, cafile: Optional[Any] = ..., dummy: Optional[Any] = ...):\n ...",
"def _load_verify_cafile(self, cafile):\n with open(cafile, \"w\") as fObj:\n fObj.write(root_cert_pem.decode(\"ascii\"))\n\n self._load_verify_locations_test(cafile)",
"def _load_verify_locations_test(self, *args):\n (server, client) = socket_pair()\n\n clientContext = Context(SSLv23_METHOD)\n clientContext.load_verify_locations(*args)\n # Require that the server certificate verify properly or the\n # connection will fail.\n clientContext.set_verify(\n VERIFY_PEER,\n lambda conn, cert, errno, depth, preverify_ok: preverify_ok,\n )\n\n clientSSL = Connection(clientContext, client)\n clientSSL.set_connect_state()\n\n serverContext = Context(SSLv23_METHOD)\n serverContext.use_certificate(\n load_certificate(FILETYPE_PEM, root_cert_pem)\n )\n serverContext.use_privatekey(\n load_privatekey(FILETYPE_PEM, root_key_pem)\n )\n\n serverSSL = Connection(serverContext, server)\n serverSSL.set_accept_state()\n\n # Without load_verify_locations above, the handshake\n # will fail:\n # Error: [('SSL routines', 'SSL3_GET_SERVER_CERTIFICATE',\n # 'certificate verify failed')]\n handshake(clientSSL, serverSSL)\n\n cert = clientSSL.get_peer_certificate()\n assert cert.get_subject().CN == \"Testing Root CA\"",
"def test_get_trusts_all(self):\n # Simple function that can be used for cleanup\n def set_scope(auth_provider, scope):\n auth_provider.scope = scope\n\n self.create_trust()\n # Listing trusts can be done by trustor, by trustee, or without\n # any filter if scoped to a project, so we must ensure token scope is\n # project for this test.\n original_scope = self.os_admin.auth_provider.scope\n set_scope(self.os_admin.auth_provider, 'project')\n self.addCleanup(set_scope, self.os_admin.auth_provider, original_scope)\n trusts_get = self.trusts_client.list_trusts()['trusts']\n trusts = [t for t in trusts_get\n if t['id'] == self.trust_id]\n self.assertEqual(1, len(trusts))\n self.validate_trust(trusts[0], summary=True)",
"def test_load_client_ca(self, context, ca_file):\n context.load_client_ca(ca_file)",
"def load_targets(self):\n ldap_services = []\n if self.ldap:\n ldap_services = self.search.get_services(ports=[389], up=True)\n\n self.ldap_strings = [\"ldap://{}\".format(service.address) for service in ldap_services]\n self.services = self.search.get_services(tags=['smb_signing_disabled'])\n self.ips = [str(service.address) for service in self.services]",
"def gen_ca():\n require_root()\n\n config.proxy.gen_ca_certs()\n log.info('OK')",
"def initialize_ssl(self):\n self.ssl_context = ssl.SSLContext()\n # if self.config.get('ca_file', None):\n # self.ssl_context.load_verify_locations(ca_file=self.config['ca_file'])\n\n # TODO : Remove this\n\n verify_ssl = self.config[\"AUTH\"][\"verify_ssl\"]\n if isinstance(verify_ssl, str):\n verify_ssl = strtobool(verify_ssl)\n\n if not verify_ssl:\n self.ssl_context.verify_mode = ssl.CERT_NONE",
"def ca():\n return trustme.CA()",
"def __init__(self):\n self.keys = []\n self.hostnames = set()\n self.trust_anchors = []\n self.app_protocols = []",
"def _load_verify_directory_locations_capath(self, capath):\n makedirs(capath)\n # Hash values computed manually with c_rehash to avoid depending on\n # c_rehash in the test suite. One is from OpenSSL 0.9.8, the other\n # from OpenSSL 1.0.0.\n for name in [b\"c7adac82.0\", b\"c3705638.0\"]:\n cafile = join_bytes_or_unicode(capath, name)\n with open(cafile, \"w\") as fObj:\n fObj.write(root_cert_pem.decode(\"ascii\"))\n\n self._load_verify_locations_test(None, capath)",
"def test_set_default_verify_paths(self):\n # Testing this requires a server with a certificate signed by one\n # of the CAs in the platform CA location. Getting one of those\n # costs money. Fortunately (or unfortunately, depending on your\n # perspective), it's easy to think of a public server on the\n # internet which has such a certificate. Connecting to the network\n # in a unit test is bad, but it's the only way I can think of to\n # really test this. -exarkun\n context = Context(SSLv23_METHOD)\n context.set_default_verify_paths()\n context.set_verify(\n VERIFY_PEER,\n lambda conn, cert, errno, depth, preverify_ok: preverify_ok,\n )\n\n client = socket_any_family()\n client.connect((\"encrypted.google.com\", 443))\n clientSSL = Connection(context, client)\n clientSSL.set_connect_state()\n clientSSL.set_tlsext_host_name(b\"encrypted.google.com\")\n clientSSL.do_handshake()\n clientSSL.send(b\"GET / HTTP/1.0\\r\\n\\r\\n\")\n assert clientSSL.recv(1024)",
"def test_CA_upload_from_all_nodes(self):\n self.x509.generate_multiple_x509_certs(servers=self.servers[:self.nodes_init])\n self.x509.upload_root_certs(server=self.master, root_ca_names=[self.x509.root_ca_names[0]])\n self.x509.upload_root_certs(server=self.servers[:self.nodes_init][1],\n root_ca_names=[self.x509.root_ca_names[1]])\n self.x509.upload_root_certs(server=self.servers[:self.nodes_init][2],\n root_ca_names=self.x509.root_ca_names[2:])\n self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])\n self.x509.delete_unused_out_of_the_box_CAs(server=self.master)\n self.x509.upload_client_cert_settings(server=self.master)\n self.auth(servers=self.servers[:self.nodes_init])\n content = self.x509.get_trusted_CAs()\n self.log.info(\"Trusted CAs: {0}\".format(content))\n expected_root_ca_names = self.x509.root_ca_names\n actual_root_ca_names = list()\n for ca_dict in content:\n subject = ca_dict[\"subject\"]\n root_ca_name = subject.split(\"CN=\")[1]\n if \"Couchbase Server\" not in root_ca_name:\n actual_root_ca_names.append(root_ca_name)\n if set(actual_root_ca_names) != set(expected_root_ca_names):\n self.fail(\"Expected {0} Actual {1}\".format(expected_root_ca_names,\n actual_root_ca_names))",
"def load_cert_chain(self, certfile, keyfile: Optional[Any] = ...):\n ...",
"def trusted_server_certificates(self):\n return self._trusted_server_certificates",
"def _load_ssl(self, ssl_options: tuple):\n try:\n self._ssl.load_cert_chain(certfile=ssl_options[0], keyfile=ssl_options[1], password=ssl_options[2])\n except IOError as e:\n self.logger.error(\"Unable to load certificate files: {}\".format(e))\n self.stop()",
"def patch_twisted_ssl_root_bug():\n import twisted.internet._sslverify as mod\n mod.platformTrust = patched_platform_trust",
"def initial_setup():\n\n if os.path.exists(cfg.ca_private_key_path()):\n pkey = _try_load_ca_private_key(cfg.ca_private_key_path())\n else:\n pkey = _generate_ca_private_key(cfg.ca_private_key_path())\n\n if os.path.exists(cfg.ca_cert_path()):\n _try_load_ca_cert(cfg.ca_cert_path())\n else:\n _generate_ca_cert(cfg.ca_cert_path(), pkey)",
"def test_set_one_ca_list(self):\n cacert = load_certificate(FILETYPE_PEM, root_cert_pem)\n cadesc = cacert.get_subject()\n\n def single_ca(ctx):\n ctx.set_client_ca_list([cadesc])\n return [cadesc]\n\n self._check_client_ca_list(single_ca)",
"def get_ssl_ca_settings():\n ca_data = {}\n https_service_endpoints = config('https-service-endpoints')\n if (https_service_endpoints and\n bool_from_string(https_service_endpoints)):\n # Pass CA cert as client will need it to\n # verify https connections\n ca = get_ca(user=SSH_USER)\n ca_bundle = ca.get_ca_bundle()\n ca_data['https_keystone'] = 'True'\n ca_data['ca_cert'] = b64encode(ca_bundle)\n return ca_data",
"def test_ssl_default(self):\n assert security.security_settings.ssl_verify()",
"def test_set_and_add_client_ca(self):\n cacert = load_certificate(FILETYPE_PEM, root_cert_pem)\n secert = load_certificate(FILETYPE_PEM, server_cert_pem)\n clcert = load_certificate(FILETYPE_PEM, server_cert_pem)\n\n cadesc = cacert.get_subject()\n sedesc = secert.get_subject()\n cldesc = clcert.get_subject()\n\n def mixed_set_add_ca(ctx):\n ctx.set_client_ca_list([cadesc, sedesc])\n ctx.add_client_ca(clcert)\n return [cadesc, sedesc, cldesc]\n\n self._check_client_ca_list(mixed_set_add_ca)",
"def test_fallback_default_verify_paths(self, monkeypatch):\n context = Context(SSLv23_METHOD)\n monkeypatch.setattr(\n _lib, \"SSL_CTX_set_default_verify_paths\", lambda x: 1\n )\n monkeypatch.setattr(\n SSL,\n \"_CRYPTOGRAPHY_MANYLINUX_CA_FILE\",\n _ffi.string(_lib.X509_get_default_cert_file()),\n )\n monkeypatch.setattr(\n SSL,\n \"_CRYPTOGRAPHY_MANYLINUX_CA_DIR\",\n _ffi.string(_lib.X509_get_default_cert_dir()),\n )\n context.set_default_verify_paths()\n store = context.get_cert_store()\n sk_obj = _lib.X509_STORE_get0_objects(store._store)\n assert sk_obj != _ffi.NULL\n num = _lib.sk_X509_OBJECT_num(sk_obj)\n assert num != 0",
"def install_ca():\n require_root()\n\n config.proxy.install_ca_cert()\n log.info('OK')",
"def load_sakai(state):\n raise NotImplementedError",
"def test_add_trusted_project4(self):\n pass"
]
| [
"0.6452937",
"0.6358674",
"0.6343231",
"0.60823315",
"0.60078466",
"0.5811615",
"0.5778859",
"0.5626693",
"0.5504848",
"0.54254216",
"0.54251266",
"0.54206556",
"0.53962106",
"0.53946304",
"0.53521353",
"0.53237087",
"0.5316759",
"0.53055185",
"0.5273949",
"0.5269059",
"0.5164518",
"0.5156356",
"0.51216024",
"0.509217",
"0.5077718",
"0.50579804",
"0.50242823",
"0.50185585",
"0.49886826",
"0.49871355"
]
| 0.72097015 | 0 |
Returns the validity dict for the RPKI Validator to decode results I could have this as a class attribute but too messy I think. | def get_validity_dict() -> dict:
return {"VALID": ROA_Validity.VALID.value,
"UNKNOWN": ROA_Validity.UNKNOWN.value,
"INVALID_LENGTH": ROA_Validity.INVALID_BY_LENGTH.value,
"INVALID_ASN": ROA_Validity.INVALID_BY_ORIGIN.value} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_validate(self) -> dict:\n response = self.rc.execute(\"GET\",\n self._get_uri(GET_VALIDATE_URI),\n headers=self.header,\n verify=self.verify)\n return response.json()",
"def getKrustyValidityDictionary (self):\n if not self.isKrustyValid():\n return None\n\n stateFile = self._getKrustyInstallationFilePath()\n return self._readDictionaryFromJson(stateFile)",
"def getVitalValidityDictionary (self):\n if not self.isVitalValid():\n return None\n \n sdVitalValidity = self._getVitalValidityMarker()\n return sdVitalValidity.getData()",
"def create_return_dict_validator(self):\n return {\n 'count': {'type': 'integer', 'required': True, 'empty': False},\n 'rows': {'type': 'list', 'required': True, 'schema': {'type': 'dict'}}\n }",
"def getValidations(self):\n return self.objectValues('InstrumentValidation')",
"def get_validity_data(self) -> dict:\n\n logging.info(\"Getting data from ripe\")\n assert self.total_prefix_origin_pairs < 10000000, \"page size too small\"\n # Then we get the data from the ripe RPKI validator\n # Todo for later, change 10mil to be total count\n return self.make_query(\"bgp/?pageSize=10000000\")",
"def test__format_asn_dict(self, parser):\n for key, value in RPKI_Validator_Wrapper.get_validity_dict().items():\n d = {'asn': 'AS198051', 'prefix': '1.2.0.0/16', 'validity': key}\n assert parser._format_asn_dict(d) == [198051, '1.2.0.0/16', value]",
"def validations(self):\n return self.container['validations']",
"def validator(self) -> Optional[Dict[str, Any]]:\n return self._validator",
"def is_valid(self):\n\n # Test whether every element in required_keys is in actual_keys\n actual_keys = set(self.fields.keys())\n required_keys = set(self.required_keys)\n has_required_keys = required_keys <= actual_keys\n if not has_required_keys:\n return False\n\n # TODO: Complete the following block. \n\n # Assume all is valid at first, then as soon as one invalid\n # is detected, whole thing becomes invalid.\n all_valid = True \n\n # Now iterate over each key-value pair to check\n for key, value in self.fields.items():\n if key == 'byr':\n this_key_valid = len(str(value)) == 4 and (1920 <= value <= 2002)\n all_valid = all_valid and this_key_valid\n if key == 'iyr':\n this_key_valid = len(str(value)) == 4 and (2010 <= value <= 2020)\n all_valid = all_valid and this_key_valid\n if key == 'eyr':\n this_key_valid = len(str(value)) == 4 and (2020 <= value <= 2030)\n all_valid = all_valid and this_key_valid\n if key == 'hgt':\n if len(str(value)) < 4:\n all_valid = False\n else:\n ending = value[-2:]\n num = int(value[:-2])\n this_key_valid = (ending == 'in' and (59 <= num <= 76)) or (ending == 'cm' and (150 <= num <= 193))\n all_valid = all_valid and this_key_valid\n if key == 'hcl':\n re_str = '#[0-9a-f]{6}'\n this_key_valid = re.search(re_str, str(value)) is not None and len(str(value)) == 7\n all_valid = all_valid and this_key_valid\n if key == 'ecl':\n this_key_valid = value in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n all_valid = all_valid and this_key_valid\n if key == 'pid':\n re_str = '[0-9]{9}'\n this_key_valid = re.search(re_str, str(value)) is not None and len(str(value)) == 9\n all_valid = all_valid and this_key_valid\n if key == 'cid':\n this_key_valid = True\n all_valid = all_valid and this_key_valid\n\n # If all fields are valid, return True\n return all_valid",
"def get_dict(self):\n # type: () -> dict\n self.is_valid()\n return self._get_dict()",
"def test_kyc_get_validation_legal(self):\n pass",
"def getMainValidityDictionary (self):\n if not self.isMainValid():\n return None\n \n sdMainValidity = self._getMainValidityMarker()\n return sdMainValidity.getData()",
"def validations(self) -> Sequence['outputs.Validation']:\n return pulumi.get(self, \"validations\")",
"def validation(self):\n validation_info = {}\n for _doc in self.schema_extension_only['@graph']:\n if \"$validation\" in _doc:\n data = _doc[\"$validation\"]\n if \"definitions\" in _doc[\"$validation\"]:\n data = expand_ref(data, _doc[\"$validation\"][\"definitions\"])\n validation_info[_doc[\"@id\"]] = data\n return validation_info",
"def validate(self):\n errors = {}\n for typ, items in self._items.iteritems():\n for name, spec in items.iteritems():\n assert hasattr(spec, 'validate'), 'Does %s:%s descend from FrodoBase?' % (name, spec)\n spec_errors = spec.validate()\n if spec_errors:\n errors[name] = spec_errors\n return errors\n\n # sys.modules[__name__] = Configuration()",
"def _get_index_validity(self):\n indices = dict(index for index in self._get_indices_from_payload())\n validated_indices = {}\n\n for index_name, index_def in indices.items():\n validated_indices.update({\n index_name: {\n 'valueValid': self._is_index_value_valid(index_def),\n 'optionsValid': self._is_index_options_valid(index_def),\n 'buildType': self._index_build(index_def),\n }\n })\n return validated_indices",
"def get_validate(self):\n return self.validate",
"def _evaluation():\n return {\n 'type' : 'class',\n 'name' : 'evaluation',\n 'base' : None,\n 'is_abstract' : False,\n 'doc' : None,\n 'properties' : [\n ('date', 'datetime', '0.1', None),\n ('description', 'str', '0.1', None),\n ('did_pass', 'bool', '0.1', None),\n ('explanation', 'str', '0.1', None),\n ('specification', 'str', '0.1', None),\n ('specification_hyperlink', 'str', '0.1', None),\n ('type', 'str', '0.1', None),\n ('type_hyperlink', 'str', '0.1', None),\n ('title', 'str', '0.1', None),\n ],\n 'decodings' : [\n ('date', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date'),\n ('description', 'gmd:evaluationMethodDescription/gco:CharacterString'),\n ('did_pass', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:pass/gco:Boolean'),\n ('explanation', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:explanation/gco:CharacterString'),\n ('type', 'child::gmd:result/@xlink:title'),\n ('type_hyperlink', 'child::gmd:result/@xlink:href'),\n ('specification', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:specification/@xlink:title'),\n ('specification_hyperlink', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:specification/@xlink:href'),\n ('title', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString'),\n ]\n }",
"def getValidityOfResiduesInSequence(self, seq):\n seqList = list(seq)\n aSpotted_Index = -1\n aSpotted_residue = \"\"\n if self.aligntIsDna:\n _alphabet = self.DNA_ALPHABET\n else:\n _alphabet = self.PROTEIN_ALPHABET\n # iterate over the sequence given the prior knowldege of the user\n for i in range(len(seqList)):\n residue = seqList[i]\n if str.upper(residue) not in list(_alphabet):\n aSpotted_Index = i\n aSpotted_residue = residue\n break\n rv = {\n \"residueIndex\": aSpotted_Index,\n \"residue\": aSpotted_residue,\n \"recognizedAlphabet\": self.VALID_DNA_OR_PROTEIN\n }\n if (aSpotted_residue != \"\"):\n if self.aligntIsDna:\n rv[\"recognizedAlphabet\"] = self.INVALID_DNA\n else:\n rv[\"recognizedAlphabet\"] = self.INVALID_PROTEIN\n return(rv)",
"def test_kyc_get_validation(self):\n pass",
"def validate(self):\r\n return self.specs.validate(self)",
"def to_dict_impl(cls, self: 'ErrorsAndWarnings') -> Dict[str, Any]:\n # See comment above.\n return {'errors': [e.to_dict() for e in self._errors.values() # pylint: disable=protected-access\n if e.is_persistant]}",
"def validate():",
"def _validate(self):\n pass",
"def validation(self, tokens):\n return self.process_value_pairs(tokens, \"validation\")",
"def get_safety_evaluator(self):\n constraint_names = self._meta['safety_constraints']\n safety_stats = self._stats['safety_stats']\n violations = np.sum(safety_stats['total_violations'], axis=0)\n evaluator_results = collections.OrderedDict([\n (key, violations[idx]) for idx, key in enumerate(constraint_names)\n ])\n return evaluator_results",
"def get_validation_errors(self):\n return [err.to_dict() for err in self._schema.validator.validation_errors]",
"def get_config_validity(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetConfigValidity', self.handle)",
"def validations(self) -> Optional[Sequence['outputs.ValidationPatch']]:\n return pulumi.get(self, \"validations\")"
]
| [
"0.68762314",
"0.65788156",
"0.6511472",
"0.63827586",
"0.6343963",
"0.62334585",
"0.6198675",
"0.6181131",
"0.6175592",
"0.61523205",
"0.6139875",
"0.61204463",
"0.6046817",
"0.6017709",
"0.5970631",
"0.59177464",
"0.58760285",
"0.5810927",
"0.57986134",
"0.5791891",
"0.5789971",
"0.5776535",
"0.57650524",
"0.5761378",
"0.57368475",
"0.5712917",
"0.5711977",
"0.570976",
"0.5704113",
"0.56982756"
]
| 0.7998628 | 0 |
Installs RPKI validator with our configs. This might break in the future, but we need to do it this way for now to be able to do what we want with our own prefix origin table. | def install(**kwargs):
config_logging(kwargs.get("stream_level", logging.DEBUG),
kwargs.get("section"))
utils.delete_paths([RPKI_Validator_Wrapper.rpki_package_path,
RPKI_Validator_Wrapper.temp_install_path])
RPKI_Validator_Wrapper._download_validator()
RPKI_Validator_Wrapper._change_file_hosted_location()
path = RPKI_Validator_Wrapper._change_server_address()
RPKI_Validator_Wrapper._config_absolute_paths(path) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _start_validator(self):\n\n logging.info(\"Starting RPKI Validator\")\n utils.run_cmds((f\"cd {self.rpki_package_path} && \"\n f\"./{self.rpki_run_name}\"))",
"def _download_validator():\n\n rpki_url = (\"https://ftp.ripe.net/tools/rpki/validator3/beta/generic/\"\n \"rpki-validator-3-latest-dist.tar.gz\")\n arin_tal = (\"https://www.arin.net/resources/manage/rpki/\"\n \"arin-ripevalidator.tal\")\n # This is the java version they use so we will use it\n cmds = [f\"mkdir {RPKI_Validator_Wrapper.temp_install_path}\",\n f\"cd {RPKI_Validator_Wrapper.temp_install_path}\",\n \"sudo apt-get -y install openjdk-8-jre\",\n f\"wget {rpki_url}\",\n \"tar -xvf rpki-validator-3-latest-dist.tar.gz\",\n \"rm -rf rpki-validator-3-latest-dist.tar.gz\",\n f\"mv rpki-validator* {RPKI_Validator_Wrapper.rpki_package_path}\",\n f\"cd {RPKI_Validator_Wrapper.rpki_package_path}\",\n \"cd preconfigured-tals\",\n f\"wget {arin_tal}\"]\n utils.run_cmds(cmds)",
"def validateKrusty (self):\n self.mountMainPartition()\n installDictionary = self._createKrustyInstallationDictionary()\n self._writeDictionaryAsJson(installDictionary, self._getKrustyInstallationFilePath())\n self._log(\"validate-krusty\").notice(\"secure digital software (krusty) is validated\")",
"def load_validator_schema():\n logger.info('Loading validator schemas')\n SchemaLoader.load_all_from_path(validator_config_path)",
"def __validate_and_compile(self):\n self.__validate_file(self.__schema_path, \"RNG\")\n\n with open(self.__schema_path) as relax_file_handler:\n\n # Parse schema file\n relax_parsed = etree.parse(relax_file_handler)\n\n # Compile schema file\n self.__compiled = etree.RelaxNG(relax_parsed)",
"def install_schemas(setup_path, names, lp, creds, reporter):\n session_info = system_session()\n\n lp.set(\"dsdb:schema update allowed\", \"yes\")\n\n # Step 1. Extending the prefixmap attribute of the schema DN record\n names = guess_names_from_smbconf(lp, None, None)\n samdb = SamDB(url=get_ldb_url(lp, creds, names), session_info=session_info,\n credentials=creds, lp=lp)\n\n reporter.reportNextStep(\"Register Exchange OIDs\")\n\n try:\n schemadn = str(names.schemadn)\n current = samdb.search(expression=\"objectClass=classSchema\", base=schemadn,\n scope=SCOPE_BASE)\n\n schema_ldif = \"\"\n prefixmap_data = \"\"\n for ent in current:\n schema_ldif += samdb.write_ldif(ent, ldb.CHANGETYPE_NONE)\n\n prefixmap_data = open(setup_path(\"AD/prefixMap.txt\"), 'r').read()\n prefixmap_data = b64encode(prefixmap_data)\n\n # We don't actually add this ldif, just parse it\n prefixmap_ldif = \"dn: %s\\nprefixMap:: %s\\n\\n\" % (schemadn, prefixmap_data)\n dsdb._dsdb_set_schema_from_ldif(samdb, prefixmap_ldif, schema_ldif, schemadn)\n except RuntimeError as err:\n print (\"[!] error while provisioning the prefixMap: %s\"\n % str(err))\n except LdbError as err:\n print (\"[!] error while provisioning the prefixMap: %s\"\n % str(err))\n\n try:\n provision_schema(setup_path, names, lp, creds, reporter, \"AD/oc_provision_schema_attributes.ldif\", \"Add Exchange attributes to Samba schema\")\n provision_schema(setup_path, names, lp, creds, reporter, \"AD/oc_provision_schema_auxiliary_class.ldif\", \"Add Exchange auxiliary classes to Samba schema\")\n provision_schema(setup_path, names, lp, creds, reporter, \"AD/oc_provision_schema_objectCategory.ldif\", \"Add Exchange objectCategory to Samba schema\")\n provision_schema(setup_path, names, lp, creds, reporter, \"AD/oc_provision_schema_container.ldif\", \"Add Exchange containers to Samba schema\")\n provision_schema(setup_path, names, lp, creds, reporter, \"AD/oc_provision_schema_subcontainer.ldif\", \"Add Exchange *sub* containers to Samba schema\")\n provision_schema(setup_path, names, lp, creds, reporter, \"AD/oc_provision_schema_sub_CfgProtocol.ldif\", \"Add Exchange CfgProtocol subcontainers to Samba schema\")\n provision_schema(setup_path, names, lp, creds, reporter, \"AD/oc_provision_schema_sub_mailGateway.ldif\", \"Add Exchange mailGateway subcontainers to Samba schema\")\n provision_schema(setup_path, names, lp, creds, reporter, \"AD/oc_provision_schema.ldif\", \"Add Exchange classes to Samba schema\")\n modify_schema(setup_path, names, lp, creds, reporter, \"AD/oc_provision_schema_possSuperior.ldif\", \"Add possSuperior attributes to Exchange classes\")\n modify_schema(setup_path, names, lp, creds, reporter, \"AD/oc_provision_schema_modify.ldif\", \"Extend existing Samba classes and attributes\")\n except LdbError, ldb_error:\n print (\"[!] error while provisioning the Exchange\"\n \" schema classes (%d): %s\"\n % ldb_error.args)\n\n try:\n provision_schema(setup_path, names, lp, creds, reporter, \"AD/oc_provision_configuration.ldif\", \"Exchange Samba with Exchange configuration objects\")\n modify_schema(setup_path, names, lp, creds, reporter, \"AD/oc_provision_configuration_finalize.ldif\", \"Finalize Exchange configuration objects\")\n print \"[SUCCESS] Done!\"\n except LdbError, ldb_error:\n print (\"[!] error while provisioning the Exchange configuration\"\n \" objects (%d): %s\" % ldb_error.args)",
"def _install(self):\n\n pass",
"def pre_installation(self):\n pass",
"def add_validators():\n vc = VimageConfig(getattr(settings, CONFIG_NAME))\n vc.add_validators()",
"def setup_schema(command, conf, vars):",
"def pre_install(self, installable_pkgs):\n pass",
"def load_trust_anchors(self):\n\n utils.write_to_stdout(f\"{datetime.now()}: Loading RPKI Validator\\n\",\n logging.root.level)\n time.sleep(60)\n while self._get_validation_status() is False:\n time.sleep(10)\n utils.write_to_stdout(\".\", logging.root.level)\n utils.write_to_stdout(\"\\n\", logging.root.level)\n self._wait(30, \"Waiting for upload to bgp preview\")",
"def base_install():\n # scwrl\n scwrl = {}\n print('{BOLD}{HEADER}Generating configuration files for ISAMBARD.{END_C}\\n'\n 'All required input can use tab completion for paths.\\n'\n '{BOLD}Setting up SCWRL 4.0 (Recommended){END_C}'.format(**text_colours))\n scwrl_path = get_user_path('Please provide a path to your SCWRL executable', required=False)\n scwrl['path'] = str(scwrl_path)\n pack_mode = get_user_option(\n 'Please choose your packing mode (flexible is significantly slower but is more accurate).',\n ['flexible', 'rigid'])\n if pack_mode == 'rigid':\n scwrl['rigid_rotamer_model'] = True\n else:\n scwrl['rigid_rotamer_model'] = False\n settings['scwrl'] = scwrl\n\n # dssp\n print('{BOLD}Setting up DSSP (Recommended){END_C}'.format(**text_colours))\n dssp = {}\n dssp_path = get_user_path('Please provide a path to your DSSP executable.', required=False)\n dssp['path'] = str(dssp_path)\n settings['dssp'] = dssp\n\n # buff\n print('{BOLD}Setting up BUFF (Required){END_C}'.format(**text_colours))\n buff = {}\n ffs = []\n ff_dir = isambard_path / 'buff' / 'force_fields'\n for ff_file in os.listdir(str(ff_dir)):\n ff = pathlib.Path(ff_file)\n ffs.append(ff.stem)\n force_field_choice = get_user_option(\n 'Please choose the default BUFF force field, this can be modified during runtime.',\n ffs)\n buff['default_force_field'] = force_field_choice\n settings['buff'] = buff\n return",
"def post(self):\n json_data = request.get_json()\n try:\n data = PolicyInstallRequestSchema().load(json_data)\n except ValidationError as err:\n messages = []\n for msg in err.messages:\n messages.append(\"{}: {}\".format(msg, \":\".join(err.messages[msg])))\n return messages, 422\n policy = base64.b64decode(data[\"policy\"])\n signature = base64.b64decode(data[\"signature\"])\n\n\n with open(\"{}/management.pem\".format(os.getenv(\"CONFIG_DIR\")), \"r\") as fh:\n pem = \"\".join(fh.readlines())\n key = serialization.load_pem_public_key(pem.encode(\"utf-8\"))\n\n try:\n key.verify(signature=signature, data=policy, signature_algorithm=ec.ECDSA(hashes.SHA256()))\n except InvalidSignature:\n return {\"messages\": [\"Invalid signature\"]}, 400\n \n print(\"Ok to install!\")",
"def pre_install_pkg(self, installable_pkg):\n pass",
"def CheckPrereqs():\n logging.info('entering ...')\n\n if platform.system() != 'Linux' and platform.system() != 'Darwin':\n Die('Sorry, this script assumes Linux or Mac OS X thus far. '\n 'Please feel free to edit the source and fix it to your needs.')\n\n # Ensure source files are available.\n for f in [\n 'validator-main.protoascii', 'validator.proto', 'validator_gen_js.py',\n 'package.json', 'js/engine/validator.js', 'js/engine/validator_test.js',\n 'js/engine/validator-in-browser.js', 'js/engine/tokenize-css.js',\n 'js/engine/definitions.js', 'js/engine/parse-css.js',\n 'js/engine/parse-srcset.js', 'js/engine/parse-url.js'\n ]:\n if not os.path.exists(f):\n Die('%s not found. Must run in amp_validator source directory.' % f)\n\n # Ensure protoc is available.\n try:\n libprotoc_version = subprocess.check_output(['protoc', '--version'])\n except (subprocess.CalledProcessError, OSError):\n Die('Protobuf compiler not found. Try \"apt-get install protobuf-compiler\" '\n 'or follow the install instructions at '\n 'https://github.com/ampproject/amphtml/blob/main/validator/README.md#installation.'\n )\n\n # Ensure 'libprotoc 2.5.0' or newer.\n m = re.search(b'^(\\\\w+) (\\\\d+)\\\\.(\\\\d+)\\\\.(\\\\d+)', libprotoc_version)\n if (m.group(1) != b'libprotoc' or\n (int(m.group(2)), int(m.group(3)), int(m.group(4))) < (2, 5, 0)):\n Die('Expected libprotoc 2.5.0 or newer, saw: %s' % libprotoc_version)\n\n # Ensure that the Python protobuf package is installed.\n for m in ['descriptor', 'text_format', 'json_format']:\n module = 'google.protobuf.%s' % m\n try:\n __import__(module)\n except ImportError:\n # Python3 needs pip3. Python 2 needs pip.\n if sys.version_info < (3, 0):\n Die('%s not found. Try \"pip install protobuf\" or follow the install '\n 'instructions at https://github.com/ampproject/amphtml/blob/main/'\n 'validator/README.md#installation' % module)\n else:\n Die('%s not found. Try \"pip3 install protobuf\" or follow the install '\n 'instructions at https://github.com/ampproject/amphtml/blob/main/'\n 'validator/README.md#installation' % module)\n\n # Ensure JVM installed. TODO: Check for version?\n try:\n subprocess.check_output(['java', '-version'], stderr=subprocess.STDOUT)\n except (subprocess.CalledProcessError, OSError):\n Die('Java missing. Try \"apt-get install openjdk-7-jre\" or follow the'\n 'install instructions at'\n 'https://github.com/ampproject/amphtml/blob/main/validator/README.md#installation'\n )\n logging.info('... done')",
"def install(cls):\n return cls.interface.set_table(cls.schema)",
"def Install (self):\n if self in sys.meta_path:\n return\n sys.meta_path.insert (0, self)",
"def test_install(self):\n self.assertIn('kser', [x.key for x in pkg_resources.working_set])",
"async def start_validators(self):\n if self.is_client():\n return\n\n await sleep(random.random() * 3)\n\n cmd = \"/home/martijn/stellar-core/stellar-core run\"\n out_file = open(\"stellar.out\", \"w\")\n self.validator_process = subprocess.Popen(cmd.split(\" \"), stdout=out_file, stderr=out_file)",
"def s_validation(path_setup=None):\n if path_setup is not None:\n # import validation setup\n fname = os.path.basename(path_setup)\n mname, ext = os.path.splitext(fname)\n val_module = imp.load_source(mname, path_setup)\n jobs, process = val_module.setup_process()\n results_path = '/data-write/RADAR/Validation_FFascetti/'\n for job in jobs:\n results = process.calc(job)\n netcdf_results_manager(results, results_path)",
"def install(self):\n raise NotImplementedError",
"def _ensure_ctypesgen(self):\n try:\n subprocess.check_call(\n ['ctypesgen.py', '--help'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n except OSError:\n sys.stderr.write(\n 'ctypesgen.py not found in $PATH, attempting installation'\n )\n install_package(['ctypesgen'])\n except subprocess.CalledProcessError:\n sys.stderr.write(\n 'ctypesgen.py is installed, but not functioning properly, '\n 'consider reinstalling.\\n'\n )\n sys.exit(1)",
"def pip_install_req_file(req_file):\n pip_cmd = 'pip install -q --disable-pip-version-check --exists-action w'\n sh(f\"{pip_cmd} -r {req_file}\")",
"def dbValidator():\n fileused = getFileUsed()\n\n # Get the values we need from NuPIC's configuration\n host = Configuration.get(\"nupic.cluster.database.host\")\n port = int(Configuration.get(\"nupic.cluster.database.port\"))\n user = Configuration.get(\"nupic.cluster.database.user\")\n \n #BUG was here\n #passwd =len(Configuration.get(\"nupic.cluster.database.passwd\")) * '*'\n passwd =Configuration.get(\"nupic.cluster.database.passwd\")\n\n\n print \"This script will validate that your MySQL is setup correctly for \"\n print \"NuPIC. MySQL is required for NuPIC swarming. The settings are\"\n print \"defined in a configuration file found in \"\n print \"$NUPIC/src/nupic/support/nupic-default.xml Out of the box those \"\n print \"settings contain MySQL's default access credentials.\"\n print\n print \"The nupic-default.xml can be duplicated to define user specific \"\n print \"changes calling the copied file \"\n print \"$NUPIC/src/nupic/support/nupic-site.xml Refer to the \"\n print \"nupic-default.xml for additional instructions.\"\n print\n print \"Defaults: localhost, 3306, root, no password\"\n print\n print \"Retrieved the following NuPIC configuration using: \", fileused\n print \" host : \", host\n print \" port : \", port\n print \" user : \", user\n print \" passwd : \", len(passwd)* '*'\n \n if testDbConnection(host, port, user, passwd):\n print \"Connection successful!!\"\n else:\n print (\"Couldn't connect to the database or you don't have the \"\n \"permissions required to create databases and tables. \"\n \"Please ensure you have MySQL\\n installed, running, \"\n \"accessible using the NuPIC configuration settings, \"\n \"and the user specified has permission to create both \"\n \"databases and tables.\")",
"def _install(self):\n # Default implementation\n for pm_name, package in self._provider_package.items():\n if helpers[pm_name]:\n helpers[pm_name].install_package(package)\n return\n raise self.unsure_how_to_install()",
"def install():\n verun('pip install -r {0}'.format(requirements))",
"def setup_validation(self, client, *args, **keyword_args):\n raise NotImplementedError(\"Please fix me.\")",
"def setup_validation(self, client, *args, **keyword_args):\n raise NotImplementedError(\"Please fix me.\")",
"def install_requirements():\n run_commands('pip install -r ./requirements/dev.txt')"
]
| [
"0.63697255",
"0.5358504",
"0.5253388",
"0.5171715",
"0.5128011",
"0.50729746",
"0.5013778",
"0.4977327",
"0.49645743",
"0.48654142",
"0.48506802",
"0.484606",
"0.47760716",
"0.46838087",
"0.4675308",
"0.4669454",
"0.46684492",
"0.46496302",
"0.4635433",
"0.46273148",
"0.4615567",
"0.4607344",
"0.45943037",
"0.45930207",
"0.45921642",
"0.45883304",
"0.45708865",
"0.4549413",
"0.4549413",
"0.45492402"
]
| 0.6363485 | 1 |
Downloads validator into proper location | def _download_validator():
rpki_url = ("https://ftp.ripe.net/tools/rpki/validator3/beta/generic/"
"rpki-validator-3-latest-dist.tar.gz")
arin_tal = ("https://www.arin.net/resources/manage/rpki/"
"arin-ripevalidator.tal")
# This is the java version they use so we will use it
cmds = [f"mkdir {RPKI_Validator_Wrapper.temp_install_path}",
f"cd {RPKI_Validator_Wrapper.temp_install_path}",
"sudo apt-get -y install openjdk-8-jre",
f"wget {rpki_url}",
"tar -xvf rpki-validator-3-latest-dist.tar.gz",
"rm -rf rpki-validator-3-latest-dist.tar.gz",
f"mv rpki-validator* {RPKI_Validator_Wrapper.rpki_package_path}",
f"cd {RPKI_Validator_Wrapper.rpki_package_path}",
"cd preconfigured-tals",
f"wget {arin_tal}"]
utils.run_cmds(cmds) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _download(self):\n self._system.download(\"http://geant4.web.cern.ch/geant4/support/source/\" + self._tar_name)",
"def main():\n sandbox = create_sandbox()\n directory = download_package_to_sandbox(\n sandbox,\n 'https://pypi.python.org/packages/source/c/checkmyreqs/checkmyreqs-0.1.6.tar.gz'\n )\n print(directory)\n destroy_sandbox(sandbox)",
"def maybe_download_and_extract(self, DATA_URL):\n\n print('Will download the pre-trained Inception Model to the same path with this validator!')\n self.Model_Save_Path = os.path.join(\"/\",\n os.getcwd(), 'DownLoaded_Inception/')\n print('Start download to ' + self.Model_Save_Path)\n\n if not os.path.exists(self.Model_Save_Path):\n os.makedirs(self.Model_Save_Path)\n\n dest_directory = self.Model_Save_Path\n\n filename = self.DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(\n DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Succesfully downloaded', filename,\n statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def get_validation_file_path(self):\n validation_file_name = self.get_validation_file_name()\n if self.helper_decoders_one_class:\n validation_file_name = validation_file_name + \"_1\"\n\n return self.base_folder_path + \"/outputs/\" + validation_file_name + \".txt\"",
"def _start_validator(self):\n\n logging.info(\"Starting RPKI Validator\")\n utils.run_cmds((f\"cd {self.rpki_package_path} && \"\n f\"./{self.rpki_run_name}\"))",
"def _download(self) -> None:\n if self._check_integrity():\n print(\"Files already downloaded and verified\")\n return\n\n download_and_extract_archive(\n self.url,\n self.root,\n filename=self.filename,\n md5=self.md5 if self.checksum else None,\n )\n\n # Generate train/val/test splits\n # Always check the sha256 of this file before executing\n # to avoid malicious code injection\n with working_dir(self.root):\n with open(\"split.py\") as f:\n split = f.read().encode(\"utf-8\")\n assert hashlib.sha256(split).hexdigest() == self.sha256\n exec(split)",
"def download(self):\n file_url = posixpath.join(self.mirrors, self.resources)\n _urlretrieve(file_url, os.path.join(self.root, self.resources))",
"def download(self):\n file_url = posixpath.join(self.mirrors, self.resources)\n _urlretrieve(file_url, os.path.join(self.root, self.resources))",
"def fetch(self) -> None:\n workflow_spec_path = os.path.join(self._output_dir, self._spec)\n self._download_file(self._parsed_url.original_url, workflow_spec_path)",
"def s_validation(path_setup=None):\n if path_setup is not None:\n # import validation setup\n fname = os.path.basename(path_setup)\n mname, ext = os.path.splitext(fname)\n val_module = imp.load_source(mname, path_setup)\n jobs, process = val_module.setup_process()\n results_path = '/data-write/RADAR/Validation_FFascetti/'\n for job in jobs:\n results = process.calc(job)\n netcdf_results_manager(results, results_path)",
"def download(self, download_path):\n return",
"def download(self):\n pass",
"def download(self):\n pass",
"def __download(self):\n\n # Use the default repository if set to True\n if self.repository is True:\n self.repository = self.__default_repository\n\n if not self.repository and not self.url:\n tarball = 'ucx-{}.tar.gz'.format(self.__version)\n self.url = '{0}/v{1}/{2}'.format(self.__baseurl, self.__version,\n tarball)",
"def _download(self) -> None:\n download_url(\n self.url,\n self.root,\n filename=self.data_dir,\n md5=self.md5 if self.checksum else None,\n )\n self._extract()",
"def download():\n basedir = os.path.dirname(os.path.dirname(__file__))\n print(basedir)\n datadir = os.path.join(basedir,\"data/NeonTreeEvaluation/\")\n print(\"Downloading data files to {}\".format(datadir)) \n eval_url = zenodo_url(concept_rec_id=\"3723356\", datadir=datadir)",
"def test_download(self):\n pass",
"def run_downloader(self):\n \"\"\"calls to the file downloader\"\"\"\n try:\n html = self.get_page(self.url)\n soup = self.get_soup(html)\n if soup is not None: # If we have soup -\n self.get_links(soup)\n self.get_files()\n else:\n self.producer(\"THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR\", 'data source format is not as expected',\n e)\n return False\n except Exception as e:\n self.producer(\"THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR\", 'data source format is not as expected', e)\n\n return False\n return True",
"def run(self):\n download(self.attempt)",
"def GenValidatorPb2Py(out_dir):\n logging.info('entering ...')\n assert re.match(r'^[a-zA-Z_\\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir\n\n subprocess.check_call(\n ['protoc', 'validator.proto', '--python_out=%s' % out_dir])\n open('%s/__init__.py' % out_dir, 'w').close()\n logging.info('... done')",
"def load_trust_anchors(self):\n\n utils.write_to_stdout(f\"{datetime.now()}: Loading RPKI Validator\\n\",\n logging.root.level)\n time.sleep(60)\n while self._get_validation_status() is False:\n time.sleep(10)\n utils.write_to_stdout(\".\", logging.root.level)\n utils.write_to_stdout(\"\\n\", logging.root.level)\n self._wait(30, \"Waiting for upload to bgp preview\")",
"def _download_package(self, pkg_metadata, validate=True):\n pkg_name = pkg_metadata.get('name')\n package_folder = os.path.join(self.output_folder, pkg_name)\n if os.path.isdir(package_folder):\n if self.newest:\n shutil.rmtree(package_folder)\n else:\n if self.symlink_dir and 'main' in pkg_metadata:\n self._create_symlink(package_folder, pkg_metadata['main'])\n print('\\t{} already installed, use --newest.'.format(pkg_name))\n return\n dist = pkg_metadata.get('dist')\n tar_url = dist.get('tarball')\n shasum = dist.get('shasum')\n\n print('\\tDownloading {} from {}'.format(pkg_name, tar_url))\n\n tar_data = requests.get(tar_url)\n compressed_file = BytesIO(tar_data.content)\n if validate and not self._validate_hash(compressed_file.read(), shasum):\n return None\n\n compressed_file.seek(0)\n with tarfile.open(fileobj=compressed_file, mode='r:gz') as tar:\n tar.extractall(self.output_folder)\n if os.path.isdir(os.path.join(self.output_folder, 'package')):\n # self.created(package_folder)\n shutil.move(os.path.join(self.output_folder, 'package'), package_folder)\n if self.symlink_dir and 'main' in pkg_metadata:\n self._create_symlink(package_folder, pkg_metadata['main'])",
"def __maybeDownload():\n if not os.path.isdir(Download.DATA_ROOT): # 若 data 目录不存在,创建 data 目录\n os.mkdir(Download.DATA_ROOT)\n file_path = os.path.join(Download.DATA_ROOT, Download.FILE_NAME)\n\n if os.path.exists(file_path): # 若已存在该文件\n statinfo = os.stat(file_path)\n if statinfo.st_size == Download.FILE_SIZE: # 若该文件正确,直接返回 file_path\n print('Found and verified %s' % file_path)\n return file_path\n else: # 否则,删除文件重新下载\n os.remove(file_path)\n\n download_url = Download.URL + Download.FILE_NAME\n print('Downloading %s ...' % download_url)\n filename, _ = urlretrieve(download_url, file_path) # 下载数据\n print('Finish downloading')\n\n statinfo = os.stat(filename)\n if statinfo.st_size == Download.FILE_SIZE: # 校验数据是否正确下载\n print('Found and verified %s' % filename)\n else:\n print(statinfo.st_size)\n raise Exception('Failed to verify ' + filename + '. Can you get to it with a browser ?')\n return filename",
"def download_build(self, name, dst_directory):\n logging.info('Not downloading build because no Filestore.')",
"def resolve(self):\n # Store the current source in the dependency object\n self.dependency.current_source = self.source\n\n # Use the first 6 characters of the SHA1 hash of the repository url\n # to uniquely identify the repository\n source_hash = hashlib.sha1(self.source.encode(\"utf-8\")).hexdigest()[:6]\n\n # The folder for storing the file\n folder_name = \"http-\" + source_hash\n folder_path = os.path.join(self.cwd, folder_name)\n\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n if self.dependency.filename:\n filename = self.dependency.filename\n else:\n filename = None\n\n file_path = self.url_download.download(\n cwd=folder_path, source=self.source, filename=filename\n )\n\n assert os.path.isfile(file_path), \"We should have a valid path here!\"\n\n return file_path",
"def _maybe_download(self, url):\n filename = os.path.basename(url)\n download_path = os.path.join(self._model_dir, filename)\n if os.path.exists(download_path):\n return download_path\n\n def _progress(count, block_size, total_size):\n sys.stdout.write(\n '\\r>> Downloading %s %.1f%%' %\n (filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n urllib.request.urlretrieve(url, download_path, _progress)\n statinfo = os.stat(download_path)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n return download_path",
"def __getFromJEMpage(self):\n\n if not self.__download(self.repo, self.version, self.lib_tar, self.dest_dir): return False\n if not self.__extract(self.lib_tar): return False\n\n if not self.__download(self.repo, self.version, self.launcher_tar, self.dest_dir): return False\n if not self.__extract(self.launcher_tar): return False\n\n self.logger.info(\"successfully downloaded and extracted JEM ver %s from repo %s\" % (self.version, self.repo))\n\n if os.path.exists(self.dest_dir + \"/JEM.py\"):\n os.environ[\"JEM_PACKAGEPATH\"] = self.dest_dir\n\n\n return True",
"def download() -> Path:\n rts_downloader.download()\n rts_gmlc_dir = Path(rts_downloader.rts_download_path) / \"RTS-GMLC\"\n return rts_gmlc_dir",
"def _download(self):\n self._system.download_file(\"http://curl.haxx.se/download/\" + self._tar_name)",
"def _begin_validation(\n session: UpdateSession,\n config: config.Config,\n loop: asyncio.AbstractEventLoop,\n downloaded_update_path: str,\n actions: update_actions.UpdateActionsInterface,\n) -> \"asyncio.futures.Future[Optional[str]]\":\n session.set_stage(Stages.VALIDATING)\n cert_path = config.update_cert_path if config.signature_required else None\n\n validation_future = asyncio.ensure_future(\n loop.run_in_executor(\n None,\n actions.validate_update,\n downloaded_update_path,\n session.set_progress,\n cert_path,\n )\n )\n\n def validation_done(fut):\n exc = fut.exception()\n if exc:\n session.set_error(getattr(exc, \"short\", str(type(exc))), str(exc))\n else:\n rootfs_file = fut.result()\n loop.call_soon_threadsafe(_begin_write, session, loop, rootfs_file, actions)\n\n validation_future.add_done_callback(validation_done)\n return validation_future"
]
| [
"0.5852008",
"0.5642754",
"0.5526515",
"0.5338583",
"0.53289795",
"0.5327149",
"0.5300025",
"0.5300025",
"0.52865535",
"0.52240986",
"0.522238",
"0.51255095",
"0.51255095",
"0.51233405",
"0.5114986",
"0.5091691",
"0.5079647",
"0.5037147",
"0.5001166",
"0.49831185",
"0.49790952",
"0.49420977",
"0.49347186",
"0.49323568",
"0.49241662",
"0.4911217",
"0.49090663",
"0.48803863",
"0.48794177",
"0.48705354"
]
| 0.7724067 | 0 |
Configure rpki validator to run off absolute paths This is necessary due to script being called from elsewhere In other words not from inside the RPKI dir. | def _config_absolute_paths(path):
# Since I am calling the script from elsewhere these must be
# absolute paths
prepend = "rpki.validator.data.path="
replace = "."
# Must remove trailing backslash at the end
replace_with = RPKI_Validator_Wrapper.rpki_package_path[:-1]
utils.replace_line(path, prepend, replace, replace_with)
prepend = "rpki.validator.preconfigured.trust.anchors.directory="
replace = "./preconfigured-tals"
replace_with = (f"{RPKI_Validator_Wrapper.rpki_package_path}"
"preconfigured-tals")
utils.replace_line(path, prepend, replace, replace_with)
prepend = "rpki.validator.rsync.local.storage.directory="
replace = "./rsync"
replace_with = f"{RPKI_Validator_Wrapper.rpki_package_path}rsync"
utils.replace_line(path, prepend, replace, replace_with) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_paths(self):\n # When we're started with a *.qemuboot.conf arg assume that image\n # artefacts are relative to that file, rather than in whatever\n # directory DEPLOY_DIR_IMAGE in the conf file points to.\n if self.qbconfload:\n imgdir = os.path.realpath(os.path.dirname(self.qemuboot))\n if imgdir != os.path.realpath(self.get('DEPLOY_DIR_IMAGE')):\n logger.info('Setting DEPLOY_DIR_IMAGE to folder containing %s (%s)' % (self.qemuboot, imgdir))\n self.set('DEPLOY_DIR_IMAGE', imgdir)\n\n # If the STAGING_*_NATIVE directories from the config file don't exist\n # and we're in a sourced OE build directory try to extract the paths\n # from `bitbake -e`\n havenative = os.path.exists(self.get('STAGING_DIR_NATIVE')) and \\\n os.path.exists(self.get('STAGING_BINDIR_NATIVE'))\n\n if not havenative:\n if not self.bitbake_e:\n self.load_bitbake_env()\n\n if self.bitbake_e:\n native_vars = ['STAGING_DIR_NATIVE']\n for nv in native_vars:\n s = re.search('^%s=\"(.*)\"' % nv, self.bitbake_e, re.M)\n if s and s.group(1) != self.get(nv):\n logger.info('Overriding conf file setting of %s to %s from Bitbake environment' % (nv, s.group(1)))\n self.set(nv, s.group(1))\n else:\n # when we're invoked from a running bitbake instance we won't\n # be able to call `bitbake -e`, then try:\n # - get OE_TMPDIR from environment and guess paths based on it\n # - get OECORE_NATIVE_SYSROOT from environment (for sdk)\n tmpdir = self.get('OE_TMPDIR')\n oecore_native_sysroot = self.get('OECORE_NATIVE_SYSROOT')\n if tmpdir:\n logger.info('Setting STAGING_DIR_NATIVE and STAGING_BINDIR_NATIVE relative to OE_TMPDIR (%s)' % tmpdir)\n hostos, _, _, _, machine = os.uname()\n buildsys = '%s-%s' % (machine, hostos.lower())\n staging_dir_native = '%s/sysroots/%s' % (tmpdir, buildsys)\n self.set('STAGING_DIR_NATIVE', staging_dir_native)\n elif oecore_native_sysroot:\n logger.info('Setting STAGING_DIR_NATIVE to OECORE_NATIVE_SYSROOT (%s)' % oecore_native_sysroot)\n self.set('STAGING_DIR_NATIVE', oecore_native_sysroot)\n if self.get('STAGING_DIR_NATIVE'):\n # we have to assume that STAGING_BINDIR_NATIVE is at usr/bin\n staging_bindir_native = '%s/usr/bin' % self.get('STAGING_DIR_NATIVE')\n logger.info('Setting STAGING_BINDIR_NATIVE to %s' % staging_bindir_native)\n self.set('STAGING_BINDIR_NATIVE', '%s/usr/bin' % self.get('STAGING_DIR_NATIVE'))",
"def _start_validator(self):\n\n logging.info(\"Starting RPKI Validator\")\n utils.run_cmds((f\"cd {self.rpki_package_path} && \"\n f\"./{self.rpki_run_name}\"))",
"def _real_paths(config):\n for key in ('--config', '--ffmpeg-bin', '--log', '--music-source', '--working-dir'):\n if not config[key]:\n continue\n config[key] = os.path.realpath(os.path.expanduser(config[key]))",
"def sanity_check_step(self):\n\n path = 'trinityrnaseq_r%s' % self.version\n\n # these lists are definitely non-exhaustive, but better than nothing\n custom_paths = {\n 'files': [os.path.join(path, x) for x in ['Inchworm/bin/inchworm', 'Chrysalis/Chrysalis']],\n 'dirs': [os.path.join(path, x) for x in ['Butterfly/src/bin', 'util']]\n }\n\n super(EB_Trinity, self).sanity_check_step(custom_paths=custom_paths)",
"def sanity_check_step(self):\n custom_paths = {\n 'files':[\"%s/%s\" % (self.bindir, x) for x in [\"convert\", \"cplex\", \"cplexamp\"]],\n 'dirs':[],\n }\n super(EB_CPLEX, self).sanity_check_step(custom_paths=custom_paths)",
"def _download_validator():\n\n rpki_url = (\"https://ftp.ripe.net/tools/rpki/validator3/beta/generic/\"\n \"rpki-validator-3-latest-dist.tar.gz\")\n arin_tal = (\"https://www.arin.net/resources/manage/rpki/\"\n \"arin-ripevalidator.tal\")\n # This is the java version they use so we will use it\n cmds = [f\"mkdir {RPKI_Validator_Wrapper.temp_install_path}\",\n f\"cd {RPKI_Validator_Wrapper.temp_install_path}\",\n \"sudo apt-get -y install openjdk-8-jre\",\n f\"wget {rpki_url}\",\n \"tar -xvf rpki-validator-3-latest-dist.tar.gz\",\n \"rm -rf rpki-validator-3-latest-dist.tar.gz\",\n f\"mv rpki-validator* {RPKI_Validator_Wrapper.rpki_package_path}\",\n f\"cd {RPKI_Validator_Wrapper.rpki_package_path}\",\n \"cd preconfigured-tals\",\n f\"wget {arin_tal}\"]\n utils.run_cmds(cmds)",
"def getXsdValidationBasePath(self):\n pass;",
"def install(**kwargs):\n\n config_logging(kwargs.get(\"stream_level\", logging.DEBUG),\n kwargs.get(\"section\"))\n utils.delete_paths([RPKI_Validator_Wrapper.rpki_package_path,\n RPKI_Validator_Wrapper.temp_install_path])\n\n RPKI_Validator_Wrapper._download_validator()\n RPKI_Validator_Wrapper._change_file_hosted_location()\n path = RPKI_Validator_Wrapper._change_server_address()\n RPKI_Validator_Wrapper._config_absolute_paths(path)",
"def __init__(self, conf_file, syspaths):\n super(UdevNetRulesFile, self).__init__(conf_file, syspaths,\n UdevNetEntry)",
"def make_paths_absolute(dir_, cfg):\n for key in cfg.keys():\n if key.endswith(\"_path\"):\n cfg[key] = os.path.join(dir_, cfg[key])\n cfg[key] = os.path.abspath(cfg[key])\n if not os.path.isfile(cfg[key]):\n logging.error(\"%s does not exist.\", cfg[key])\n if type(cfg[key]) is dict:\n cfg[key] = make_paths_absolute(dir_, cfg[key])\n return cfg",
"def test_relative_paths(self):\n command_line = self._MENU + [\n \"some_pool\",\n \"../dev\",\n \"./fake\",\n \"/abc\",\n ]\n TEST_RUNNER(command_line)",
"def load_trust_anchors(self):\n\n utils.write_to_stdout(f\"{datetime.now()}: Loading RPKI Validator\\n\",\n logging.root.level)\n time.sleep(60)\n while self._get_validation_status() is False:\n time.sleep(10)\n utils.write_to_stdout(\".\", logging.root.level)\n utils.write_to_stdout(\"\\n\", logging.root.level)\n self._wait(30, \"Waiting for upload to bgp preview\")",
"def configure(_workdir):\n\n global workdir\n workdir = _workdir\n\n from os.path import join\n from ConfigParser import ConfigParser\n config = ConfigParser(dict(here=workdir))\n config.read(join(workdir, 'rnaseqlyze.ini'))\n\n for name, value in config.items(\"rnaseqlyze\"):\n globals()[name] = value\n\n import Bio.Entrez\n Bio.Entrez.email = admin_email",
"def set_paths(self, specs, resources):\n self.install = 'install.xml'\n self.specs_path = path_format(specs)\n self.root = path_format(dirname(dirname(self.specs_path)) + '/')\n self.res_path = path_format(resources)\n self.resources['BASE'] = self.res_path\n self.specs['BASE'] = self.specs_path",
"def _validate_path(validation_context, path, end_entity_name_override=None):\n\n if not isinstance(path, ValidationPath):\n raise TypeError(pretty_message(\n '''\n path must be an instance of certvalidator.path.ValidationPath,\n not %s\n ''',\n type_name(path)\n ))\n\n if not isinstance(validation_context, ValidationContext):\n raise TypeError(pretty_message(\n '''\n validation_context must be an instance of\n certvalidator.context.ValidationContext, not %s\n ''',\n type_name(validation_context)\n ))\n\n moment = validation_context.moment\n\n if end_entity_name_override is not None and not isinstance(end_entity_name_override, str_cls):\n raise TypeError(pretty_message(\n '''\n end_entity_name_override must be a unicode string, not %s\n ''',\n type_name(end_entity_name_override)\n ))\n\n # Inputs\n\n trust_anchor = path.first\n\n # We skip the trust anchor when measuring the path since technically\n # the trust anchor is not part of the path\n path_length = len(path) - 1\n\n # We don't accept any certificate policy or name constraint values as input\n # and instead just start allowing everything during initialization\n\n # Step 1: initialization\n\n # Step 1 a\n valid_policy_tree = PolicyTreeRoot('any_policy', set(), set(['any_policy']))\n\n # Steps 1 b-c skipped since they relate to name constraints\n\n # Steps 1 d-f\n # We do not use initial-explicit-policy, initial-any-policy-inhibit or\n # initial-policy-mapping-inhibit, so they are all set to the path length + 1\n explicit_policy = path_length + 1\n inhibit_any_policy = path_length + 1\n policy_mapping = path_length + 1\n\n # Steps 1 g-i\n working_public_key = trust_anchor.public_key\n # Step 1 j\n working_issuer_name = trust_anchor.subject\n # Step 1 k\n max_path_length = path_length\n if trust_anchor.max_path_length is not None:\n max_path_length = trust_anchor.max_path_length\n\n # Step 2: basic processing\n index = 1\n last_index = len(path) - 1\n\n completed_path = ValidationPath(trust_anchor)\n validation_context.record_validation(trust_anchor, completed_path)\n\n cert = trust_anchor\n while index <= last_index:\n cert = path[index]\n\n # Step 2 a 1\n signature_algo = cert['signature_algorithm'].signature_algo\n hash_algo = cert['signature_algorithm'].hash_algo\n\n if hash_algo in validation_context.weak_hash_algos:\n raise PathValidationError(pretty_message(\n '''\n The path could not be validated because the signature of %s\n uses the weak hash algorithm %s\n ''',\n _cert_type(index, last_index, end_entity_name_override, definite=True),\n hash_algo\n ))\n\n if signature_algo == 'rsassa_pkcs1v15':\n verify_func = asymmetric.rsa_pkcs1v15_verify\n elif signature_algo == 'dsa':\n verify_func = asymmetric.dsa_verify\n elif signature_algo == 'ecdsa':\n verify_func = asymmetric.ecdsa_verify\n else:\n raise PathValidationError(pretty_message(\n '''\n The path could not be validated because the signature of %s\n uses the unsupported algorithm %s\n ''',\n _cert_type(index, last_index, end_entity_name_override, definite=True),\n signature_algo\n ))\n\n try:\n key_object = asymmetric.load_public_key(working_public_key)\n verify_func(key_object, cert['signature_value'].native, cert['tbs_certificate'].dump(), hash_algo)\n\n except (oscrypto.errors.SignatureError):\n raise PathValidationError(pretty_message(\n '''\n The path could not be validated because the signature of %s\n could not be verified\n ''',\n _cert_type(index, last_index, end_entity_name_override, definite=True)\n ))\n\n # Step 2 a 2\n if not validation_context.is_whitelisted(cert):\n validity = cert['tbs_certificate']['validity']\n if moment < validity['not_before'].native:\n raise PathValidationError(pretty_message(\n '''\n The path could not be validated because %s is not valid\n until %s\n ''',\n _cert_type(index, last_index, end_entity_name_override, definite=True),\n validity['not_before'].native.strftime('%Y-%m-%d %H:%M:%SZ')\n ))\n if moment > validity['not_after'].native:\n raise PathValidationError(pretty_message(\n '''\n The path could not be validated because %s expired %s\n ''',\n _cert_type(index, last_index, end_entity_name_override, definite=True),\n validity['not_after'].native.strftime('%Y-%m-%d %H:%M:%SZ')\n ))\n\n # Step 2 a 3 - CRL/OCSP\n if not validation_context._skip_revocation_checks:\n status_good = False\n revocation_check_failed = False\n matched = False\n soft_fail = False\n failures = []\n\n if cert.ocsp_urls or validation_context.revocation_mode == 'require':\n try:\n verify_ocsp_response(\n cert,\n path,\n validation_context,\n cert_description=_cert_type(\n index,\n last_index,\n end_entity_name_override,\n definite=True\n ),\n end_entity_name_override=end_entity_name_override\n )\n status_good = True\n matched = True\n except (OCSPValidationIndeterminateError) as e:\n failures.extend([failure[0] for failure in e.failures])\n revocation_check_failed = True\n matched = True\n except (SoftFailError):\n soft_fail = True\n except (OCSPNoMatchesError):\n pass\n\n if not status_good and (cert.crl_distribution_points or validation_context.revocation_mode == 'require'):\n try:\n cert_description = _cert_type(index, last_index, end_entity_name_override, definite=True)\n verify_crl(\n cert,\n path,\n validation_context,\n cert_description=cert_description,\n end_entity_name_override=end_entity_name_override\n )\n revocation_check_failed = False\n status_good = True\n matched = True\n except (CRLValidationIndeterminateError) as e:\n failures.extend([failure[0] for failure in e.failures])\n revocation_check_failed = True\n matched = True\n except (SoftFailError):\n soft_fail = True\n except (CRLNoMatchesError):\n pass\n\n if not soft_fail:\n if not matched and validation_context.revocation_mode == 'require':\n raise PathValidationError(pretty_message(\n '''\n The path could not be validated because no revocation\n information could be found for %s\n ''',\n _cert_type(index, last_index, end_entity_name_override, definite=True)\n ))\n\n if not status_good and revocation_check_failed:\n raise PathValidationError(pretty_message(\n '''\n The path could not be validated because the %s revocation\n checks failed: %s\n ''',\n _cert_type(index, last_index, end_entity_name_override),\n '; '.join(failures)\n ))\n\n # Step 2 a 4\n if cert.issuer != working_issuer_name:\n raise PathValidationError(pretty_message(\n '''\n The path could not be validated because the %s issuer name\n could not be matched\n ''',\n _cert_type(index, last_index, end_entity_name_override),\n ))\n\n # Steps 2 b-c skipped since they relate to name constraints\n\n # Steps 2 d\n if cert.certificate_policies_value and valid_policy_tree is not None:\n\n cert_any_policy = None\n cert_policy_identifiers = set()\n\n # Step 2 d 1\n for policy in cert.certificate_policies_value:\n policy_identifier = policy['policy_identifier'].native\n\n if policy_identifier == 'any_policy':\n cert_any_policy = policy\n continue\n\n cert_policy_identifiers.add(policy_identifier)\n\n policy_qualifiers = policy['policy_qualifiers']\n\n policy_id_match = False\n parent_any_policy = None\n\n # Step 2 d 1 i\n for node in valid_policy_tree.at_depth(index - 1):\n if node.valid_policy == 'any_policy':\n parent_any_policy = node\n if policy_identifier not in node.expected_policy_set:\n continue\n policy_id_match = True\n node.add_child(\n policy_identifier,\n policy_qualifiers,\n set([policy_identifier])\n )\n\n # Step 2 d 1 ii\n if not policy_id_match and parent_any_policy:\n parent_any_policy.add_child(\n policy_identifier,\n policy_qualifiers,\n set([policy_identifier])\n )\n\n # Step 2 d 2\n if cert_any_policy and (inhibit_any_policy > 0 or (index < path_length and cert.self_issued)):\n for node in valid_policy_tree.at_depth(index - 1):\n for expected_policy_identifier in node.expected_policy_set:\n if expected_policy_identifier not in cert_policy_identifiers:\n node.add_child(\n expected_policy_identifier,\n cert_any_policy['policy_qualifiers'],\n set([expected_policy_identifier])\n )\n\n # Step 2 d 3\n for node in valid_policy_tree.walk_up(index - 1):\n if not node.children:\n node.parent.remove_child(node)\n if len(valid_policy_tree.children) == 0:\n valid_policy_tree = None\n\n # Step 2 e\n if cert.certificate_policies_value is None:\n valid_policy_tree = None\n\n # Step 2 f\n if valid_policy_tree is None and explicit_policy <= 0:\n raise PathValidationError(pretty_message(\n '''\n The path could not be validated because there is no valid set\n of policies for %s\n ''',\n _cert_type(index, last_index, end_entity_name_override, definite=True),\n ))\n\n if index != last_index:\n # Step 3: prepare for certificate index+1\n\n if cert.policy_mappings_value:\n policy_map = {}\n for mapping in cert.policy_mappings_value:\n issuer_domain_policy = mapping['issuer_domain_policy'].native\n subject_domain_policy = mapping['subject_domain_policy'].native\n\n if issuer_domain_policy not in policy_map:\n policy_map[issuer_domain_policy] = set()\n policy_map[issuer_domain_policy].add(subject_domain_policy)\n\n # Step 3 a\n if issuer_domain_policy == 'any_policy' or subject_domain_policy == 'any_policy':\n raise PathValidationError(pretty_message(\n '''\n The path could not be validated because %s contains\n a policy mapping for the \"any policy\"\n ''',\n _cert_type(index, last_index, end_entity_name_override, definite=True)\n ))\n\n # Step 3 b\n if valid_policy_tree is not None:\n for mapping in cert.policy_mappings_value:\n issuer_domain_policy = mapping['issuer_domain_policy'].native\n\n # Step 3 b 1\n if policy_mapping > 0:\n issuer_domain_policy_match = False\n cert_any_policy = None\n\n for node in valid_policy_tree.at_depth(index):\n if node.valid_policy == 'any_policy':\n cert_any_policy = node\n if node.valid_policy == issuer_domain_policy:\n issuer_domain_policy_match = True\n node.expected_policy_set = policy_map[issuer_domain_policy]\n\n if not issuer_domain_policy_match and cert_any_policy:\n cert_any_policy.parent.add_child(\n issuer_domain_policy,\n cert_any_policy.qualifier_set,\n policy_map[issuer_domain_policy]\n )\n\n # Step 3 b 2\n elif policy_mapping == 0:\n for node in valid_policy_tree.at_depth(index):\n if node.valid_policy == issuer_domain_policy:\n node.parent.remove_child(node)\n for node in valid_policy_tree.walk_up(index - 1):\n if not node.children:\n node.parent.remove_child(node)\n if len(valid_policy_tree.children) == 0:\n valid_policy_tree = None\n\n # Step 3 c\n working_issuer_name = cert.subject\n\n # Steps 3 d-f\n\n # Handle inheritance of DSA parameters from a signing CA to the\n # next in the chain\n copy_params = None\n if cert.public_key.algorithm == 'dsa' and cert.public_key.hash_algo is None:\n if working_public_key.algorithm == 'dsa':\n copy_params = working_public_key['algorithm']['parameters'].copy()\n\n working_public_key = cert.public_key\n\n if copy_params:\n working_public_key['algorithm']['parameters'] = copy_params\n\n # Step 3 g skipped since it relates to name constraints\n\n # Step 3 h\n if not cert.self_issued:\n # Step 3 h 1\n if explicit_policy != 0:\n explicit_policy -= 1\n # Step 3 h 2\n if policy_mapping != 0:\n policy_mapping -= 1\n # Step 3 h 3\n if inhibit_any_policy != 0:\n inhibit_any_policy -= 1\n\n # Step 3 i\n if cert.policy_constraints_value:\n # Step 3 i 1\n require_explicit_policy = cert.policy_constraints_value['require_explicit_policy'].native\n if require_explicit_policy is not None and require_explicit_policy < explicit_policy:\n explicit_policy = require_explicit_policy\n # Step 3 i 2\n inhibit_policy_mapping = cert.policy_constraints_value['inhibit_policy_mapping'].native\n if inhibit_policy_mapping is not None and inhibit_policy_mapping < policy_mapping:\n policy_mapping = inhibit_policy_mapping\n\n # Step 3 j\n if cert.inhibit_any_policy_value:\n inhibit_any_policy = min(cert.inhibit_any_policy_value.native, inhibit_any_policy)\n\n # Step 3 k\n if not cert.ca:\n raise PathValidationError(pretty_message(\n '''\n The path could not be validated because %s is not a CA\n ''',\n _cert_type(index, last_index, end_entity_name_override, definite=True)\n ))\n\n # Step 3 l\n if not cert.self_issued:\n if max_path_length == 0:\n raise PathValidationError(pretty_message(\n '''\n The path could not be validated because it exceeds the\n maximum path length\n '''\n ))\n max_path_length -= 1\n\n # Step 3 m\n if cert.max_path_length is not None and cert.max_path_length < max_path_length:\n max_path_length = cert.max_path_length\n\n # Step 3 n\n if cert.key_usage_value and 'key_cert_sign' not in cert.key_usage_value.native:\n raise PathValidationError(pretty_message(\n '''\n The path could not be validated because %s is not allowed\n to sign certificates\n ''',\n _cert_type(index, last_index, end_entity_name_override, definite=True)\n ))\n\n # Step 3 o\n # Check for critical unsupported extensions\n supported_extensions = set([\n 'authority_information_access',\n 'authority_key_identifier',\n 'basic_constraints',\n 'crl_distribution_points',\n 'extended_key_usage',\n 'freshest_crl',\n 'key_identifier',\n 'key_usage',\n 'ocsp_no_check',\n 'certificate_policies',\n 'policy_mappings',\n 'policy_constraints',\n 'inhibit_any_policy',\n ])\n unsupported_critical_extensions = cert.critical_extensions - supported_extensions\n if unsupported_critical_extensions:\n raise PathValidationError(pretty_message(\n '''\n The path could not be validated because %s contains the\n following unsupported critical extension%s: %s\n ''',\n _cert_type(index, last_index, end_entity_name_override, definite=True),\n 's' if len(unsupported_critical_extensions) != 1 else '',\n ', '.join(sorted(unsupported_critical_extensions)),\n ))\n\n if validation_context:\n completed_path = completed_path.copy().append(cert)\n validation_context.record_validation(cert, completed_path)\n\n index += 1\n\n # Step 4: wrap-up procedure\n\n # Step 4 a\n if explicit_policy != 0:\n explicit_policy -= 1\n\n # Step 4 b\n if cert.policy_constraints_value:\n if cert.policy_constraints_value['require_explicit_policy'].native == 0:\n explicit_policy = 0\n\n # Steps 4 c-e skipped since this method doesn't output it\n # Step 4 f skipped since this method defers that to the calling application\n\n # Step 4 g\n\n # Step 4 g i\n if valid_policy_tree is None:\n intersection = None\n\n # Step 4 g ii\n else:\n intersection = valid_policy_tree\n\n # Step 4 g iii is skipped since the initial policy set is always any_policy\n\n if explicit_policy == 0 and intersection is None:\n raise PathValidationError(pretty_message(\n '''\n The path could not be validated because there is no valid set of\n policies for %s\n ''',\n _cert_type(last_index, last_index, end_entity_name_override, definite=True)\n ))\n\n return cert",
"def _set_rel_paths(self):\n if self.working_dir is not None:\n self._rel_working_dir = os.path.relpath(self.working_dir)\n if self.alignment is not None:\n self._rel_alignment = os.path.relpath(self.alignment, \n self.working_dir)\n if self.out_file is not None:\n self._rel_out_file = os.path.relpath(self.out_file, \n self.working_dir)",
"def test_set_default_verify_paths(self):\n # Testing this requires a server with a certificate signed by one\n # of the CAs in the platform CA location. Getting one of those\n # costs money. Fortunately (or unfortunately, depending on your\n # perspective), it's easy to think of a public server on the\n # internet which has such a certificate. Connecting to the network\n # in a unit test is bad, but it's the only way I can think of to\n # really test this. -exarkun\n context = Context(SSLv23_METHOD)\n context.set_default_verify_paths()\n context.set_verify(\n VERIFY_PEER,\n lambda conn, cert, errno, depth, preverify_ok: preverify_ok,\n )\n\n client = socket_any_family()\n client.connect((\"encrypted.google.com\", 443))\n clientSSL = Connection(context, client)\n clientSSL.set_connect_state()\n clientSSL.set_tlsext_host_name(b\"encrypted.google.com\")\n clientSSL.do_handshake()\n clientSSL.send(b\"GET / HTTP/1.0\\r\\n\\r\\n\")\n assert clientSSL.recv(1024)",
"def s_validation(path_setup=None):\n if path_setup is not None:\n # import validation setup\n fname = os.path.basename(path_setup)\n mname, ext = os.path.splitext(fname)\n val_module = imp.load_source(mname, path_setup)\n jobs, process = val_module.setup_process()\n results_path = '/data-write/RADAR/Validation_FFascetti/'\n for job in jobs:\n results = process.calc(job)\n netcdf_results_manager(results, results_path)",
"def sanity_check_step(self):\n\n ver = 'v%s' % ''.join(self.version.split('.'))\n\n custom_paths = {\n 'files': [\"%s/fluent/bin/fluent%s\" % (ver, x) for x in ['', '_arch', '_sysinfo']],\n 'dirs': [\"%s/%s\" % (ver, x) for x in [\"ansys\", \"aisol\", \"CFD-Post\"]]\n }\n\n super(EB_FLUENT, self).sanity_check_step(custom_paths=custom_paths)",
"def valid_rsa_keypair_paths( config_path, prefix, force ) :\n\n public_key_path, private_key_path = rsa_keypair_paths( config_path, prefix )\n if not force and ( os.path.isfile( public_key_path ) or os.path.isfile( private_key_path ) ) :\n raise ValueError( \"public or private keyfile already exists\" )\n return public_key_path, private_key_path",
"def makeRelativePathsAbsolute(cmdargs):\n for i in range(len(cmdargs)):\n if relativePathRE.match(cmdargs[i]):\n cmdargs[i]=os.path.abspath(cmdargs[i])",
"def get_checks_path():\n rel_path = os.path.join(os.pardir, os.pardir, os.pardir, \"checks\")\n return os.path.abspath(os.path.join(__file__, rel_path))",
"def test_get_pyrin_root_path():\n\n root_path = os.path.abspath('.')\n assert application_services.get_pyrin_root_path() == root_path",
"def test_resource_path(self):\n\n # Without arguments\n resources_root_path = os.path.abspath(os.path.join(\n MY_DIRECTORY, '..', '..', 'resources'\n ))\n self.assertEqual(resources_root_path, paths.resource())",
"def initialize_paths(self):\n for path in self.config[\"paths\"]:\n self.force_path_to_exist(self.config[\"paths\"][path])",
"def _change_file_hosted_location():\n\n # Changes where the file is hosted\n path = (f\"{RPKI_Validator_Wrapper.rpki_package_path}conf\"\n \"/application-defaults.properties\")\n prepend = \"rpki.validator.bgp.ris.dump.urls=\"\n replace = (\"https://www.ris.ripe.net/dumps/riswhoisdump.IPv4.gz,\"\n \"https://www.ris.ripe.net/dumps/riswhoisdump.IPv6.gz\")\n replace_with = (f\"http://localhost:{RPKI_File.port}\"\n f\"/{RPKI_File.hosted_name}\")\n utils.replace_line(path, prepend, replace, replace_with)",
"def __init__(self, syspaths):\n super(UdevNetRulesFile71, self).__init__(syspaths.rules_file_71,\n syspaths)",
"def resource_path(relative_path):\n base_path= getattr(sys,'MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)",
"def create_tester_paths():\n config.config_tester()\n _create_paths(vmcheckerpaths.tester_paths())",
"def setup():\n\tglobal config_parser, config_file\n\tglobal prefix\n\n\tif os.path.islink(sys.argv[0]):\n\t\tlink = os.readlink(sys.argv[0])\n\n\t\tif not os.path.isabs(link):\n\t\t\tlink = os.path.join(os.path.dirname(sys.argv[0]), link)\n\n\t\tprefix = os.path.dirname(os.path.abspath(link))\n\telse:\n\t\tprefix = os.path.dirname(os.path.abspath(sys.argv[0]))\n\n\tconfig_parser = ConfigParser.ConfigParser()\n\tset_defaults()\n\n\tconfig_file = os.path.join (xdg_config_home, \"sushi\", \"nigiri\")\n\n\tif not check_config_file(config_file):\n\t\tprint \"Config file creation failed. Aborting.\"\n\t\treturn\n\n\tread_config_file()"
]
| [
"0.56588614",
"0.54540795",
"0.5439257",
"0.5435881",
"0.5407553",
"0.53129953",
"0.5268416",
"0.52518916",
"0.5166516",
"0.5107923",
"0.5076477",
"0.5066876",
"0.5058214",
"0.50226915",
"0.49473757",
"0.49465066",
"0.49405462",
"0.49361816",
"0.4921926",
"0.48854834",
"0.4866548",
"0.4858135",
"0.4834946",
"0.4798718",
"0.47935104",
"0.4785114",
"0.47805277",
"0.4777894",
"0.47691396",
"0.47681335"
]
| 0.71353936 | 0 |
Retrieves an xy slice at a wavelength specified by the cube's primary wavelength plus the given offset. | def _choose_wavelength_slice(self, offset):
if 'WAVE' not in self.axes_wcs.wcs.ctype:
raise cu.CubeError(2, "Spectral dimension not present")
if self.data.ndim == 4:
raise cu.CubeError(4, "Can only work with 3D cubes")
axis = -2 if self.axes_wcs.wcs.ctype[0] in ['TIME', 'UTC'] else -1
arr = None
length = self.data.shape[axis]
if isinstance(offset, int) and offset >= 0 and offset < length:
arr = self.data.take(offset, axis=axis)
if isinstance(offset, u.Quantity):
delta = self.axes_wcs.wcs.cdelt[-1 - axis] * u.m
wloffset = offset.to(u.m) / delta
wloffset = int(wloffset)
if wloffset >= 0 and wloffset < self.data.shape[axis]:
arr = self.data.take(wloffset, axis=axis)
return arr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _choose_x_slice(self, offset):\n arr = None\n axis = 0\n length = self.data.shape[axis]\n if isinstance(offset, int) and offset >= 0 and offset < length:\n arr = self.data.take(offset, axis=axis)\n\n if isinstance(offset, u.Quantity):\n unit = self.axes_wcs.wcs.cunit[-1]\n delta = self.axes_wcs.wcs.cdelt[-1] * unit\n wloffset = offset.to(unit) / delta\n wloffset = int(wloffset)\n if wloffset >= 0 and wloffset < self.data.shape[axis]:\n arr = self.data.take(wloffset, axis=axis)\n\n return arr",
"def plot_wavelength_slice(self, offset, **kwargs):\n cumul_cube_lengths = np.cumsum(np.array([c.shape[self.common_axis]\n for c in self.data]))\n sequence_index, cube_index = cu._convert_cube_like_index_to_sequence_indices(\n offset, cumul_cube_lengths)\n plot = self[sequence_index].plot_wavelength_slice(cube_index, **kwargs)\n return plot",
"def get_slice(self, limit, offset):\r\n if limit == 0:\r\n return self.objects[offset:]\r\n\r\n return self.objects[offset:offset + limit]",
"def get_slice(self):\n return self.locs[tuple(self.indices), :]",
"def getSlice(properties=None, **kw):",
"def relay_array_getitem(c, a, start, stop, strides):\n assert start.is_constant(tuple)\n assert stop.is_constant(tuple)\n assert strides.is_constant(tuple)\n return relay.op.transform.strided_slice(c.ref(a), start.value, stop.value,\n strides.value)",
"def plot_x_slice(self, offset, **kwargs):\n cumul_cube_lengths = np.cumsum(np.array([c.shape[self.common_axis]\n for c in self.data]))\n sequence_index, cube_index = cu._convert_cube_like_index_to_sequence_indices(\n offset, cumul_cube_lengths)\n plot = self[sequence_index].plot_x_slice(cube_index, **kwargs)\n return plot",
"def offset(self, offset):\n return Line(self.p + offset * self.cross_z.normalized(), self.v)",
"def offset(self, offset):\n return Line3d(self.p + offset * self.cross.normalized(), self.v)",
"def get2DSlice( self, slices: list ):\n assert(len(slices)==self._nDims-2)\n slices.extend([slice(self._nGlobalCoords[self._layout.dims_order[-2]]),\n slice(self._nGlobalCoords[self._layout.dims_order[-1]])])\n return self._f[tuple(slices)]",
"def Offset_Data(x, offset):\r\n\r\n import numpy as np;\r\n \r\n offset = np.ones(x.shape)*offset;\r\n x = x-offset;\r\n return x;",
"def get_offset(self, tuple_of_slices, shape):\n raise NotImplementedError()",
"def get1DSlice( self, slices: list ):\n assert(len(slices)==self._nDims-1)\n slices.append(slice(self._nGlobalCoords[self._layout.dims_order[-1]]))\n return self._f[tuple(slices)]",
"def _fprop_slice_np(h, stride, H, roi_offset):\n hstart = int(np.floor(float(h) * stride))\n hend = int(np.ceil(float(h + 1) * stride))\n\n hstart = min(max(hstart + roi_offset, 0), H)\n hend = min(max(hend + roi_offset, 0), H)\n\n return slice(hstart, hend), hend - hstart",
"def offsetpolygon(polyx, offset):\n polyy = []\n # need three points at a time\n for counter in range(0, len(polyx) - 3):\n # get first offset intercept\n pt = getpt(polyx[counter],\n polyx[counter + 1],\n polyx[counter + 2],\n offset)\n # append new point to polyy\n polyy.append(pt)\n # last three points\n pt = getpt(polyx[-3], polyx[-2], polyx[-1], offset)\n polyy.append(pt)\n pt = getpt(polyx[-2], polyx[-1], polyx[0], offset)\n polyy.append(pt)\n pt = getpt(polyx[-1], polyx[0], polyx[1], offset)\n polyy.append(pt)\n return polyy",
"def __getslice__(self, start, stop):\n return self.__getitem__(slice(start, stop, None))",
"def get_slice(x, indices):\n return x[indices]",
"def slice(\n sample, \n ei, psi_axis,\n hkl0, hkl_dir, x_axis, \n instrument, erange,\n out):\n from mcvine.workflow.sample import loadSampleYml\n sample = loadSampleYml(sample)\n code = \"from mcvine.workflow.DGS import %s as mod\" % instrument\n d = {}; exec(code, d); mod = d['mod']\n psi_angles = np.arange(*tuple(psi_axis))\n x_axis = np.arange(*tuple(x_axis))\n from matplotlib import pyplot as plt\n plt.figure()\n from ...singlextal import dynrange\n dynrange.plotDynRangeOfSlice(\n sample, psi_angles, ei, hkl0, hkl_dir, x_axis,\n mod.scattering_angle_constraints,\n Erange=erange)\n if out:\n plt.savefig(out)\n else:\n plt.show()\n return",
"def _slice(self, start, stop, step=None):\n\n slices = [slice(None)] * self.data.ndim\n slices[self.axis] = slice(start, stop, step)\n return tuple(slices)",
"def _slice_at_axis(sl, axis):\n return (slice(None),) * axis + (sl,) + (...,)",
"def _get_slice(series, start, length):\n return [ int(s) for s in series[start:start+length] ]",
"def slice_data(xdata, ydata, x_range):\n\tdata = zip(xdata, ydata)\n\tsliced_data = [d for d in data if d[0] >= x_range[0] and d[0] <= x_range[1]]\n\treturn array(zip(*sliced_data))",
"def xyz(self, offset=None, offsettype=None):\n if self._xyz is None:\n raise RuntimeError(\"Coordinates not defined for station \" + self._code)\n if offset is None:\n return self._xyz\n if offsettype is None:\n offsettype = Station.OFFSET_H\n if offsettype == Station.OFFSET_H:\n offset = [0, 0, offset]\n if offsettype == Station.OFFSET_H or offsettype == Station.OFFSET_GENU:\n offset = np.array(offset).dot(self._genu)\n elif offsettype == Station.OFFSET_ENU:\n offset = np.array(offset).dot(self._enu)\n elif offsettype != Station.OFFSET_XYZ:\n raise RuntimeError(\"Invalid offset type in Station.xyz\")\n return self._xyz + offset",
"def carve_slice(\n self, x_index=0, width=config()[\"panel\"][\"width\"],\n ):\n piece = []\n for row in self.grid:\n piece.append(row[x_index : x_index + width])\n\n return piece",
"def get_slice(self, n):\n if n == 0:\n return slice(self._lo_atom, self._lo_atom + self._n_atoms)\n raise IndexError(f\"{n} is invalid for a 1 dimension Slice \")",
"def plot_wavelength_slice(self, offset, axes=None, style='imshow', **kwargs):\n if axes is None:\n axes = wcsaxes_compat.gca_wcs(self.axes_wcs, slices=(\"x\", \"y\", offset))\n\n data = self._choose_wavelength_slice(offset)\n if data is None:\n data = self._choose_wavelength_slice(0)\n\n if style == 'imshow':\n plot = axes.imshow(data, **kwargs)\n elif style == 'pcolormesh':\n plot = axes.pcolormesh(data, **kwargs)\n\n return plot",
"def __getslice__(self,i,j):\n return self.x[i:j]",
"def getxxslice(self,whichsol_,first_,last_,xx_):\n _xx_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and xx_ is not None and len(xx_) != ((last_) - (first_)):\n raise ValueError(\"Array argument xx is not long enough: Is %d, expected %d\" % (len(xx_),((last_) - (first_))))\n if isinstance(xx_,numpy.ndarray) and not xx_.flags.writeable:\n raise ValueError(\"Argument xx must be writable\")\n if isinstance(xx_, numpy.ndarray) and xx_.dtype is numpy.dtype(numpy.float64) and xx_.flags.contiguous:\n _xx_copyarray = False\n _xx_tmp = ctypes.cast(xx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif xx_ is not None:\n _xx_copyarray = True\n _xx_np_tmp = numpy.zeros(len(xx_),numpy.dtype(numpy.float64))\n _xx_np_tmp[:] = xx_\n assert _xx_np_tmp.flags.contiguous\n _xx_tmp = ctypes.cast(_xx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _xx_copyarray = False\n _xx_tmp = None\n \n res = __library__.MSK_XX_getxxslice(self.__nativep,whichsol_,first_,last_,_xx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _xx_copyarray:\n xx_[:] = _xx_np_tmp",
"def slice(self,x=None,y=None,z=None,c=None):\n\n if x is None:\n x = slice(0,self._metaData[\"size_x\"])\n if y is None:\n y = slice(0,self._metaData[\"size_y\"])\n if z is None:\n z = slice(0,self._metaData[\"size_z\"])\n if c is None:\n c = slice(0,self._metaData[\"size_c\"])\n\n img_crop=MicImage()\n img_crop._metaData={**self._metaData}\n img_crop.xml=self.xml\n\n\n img_crop.pixels= copy.deepcopy(self.pixels[z,x,y,c])\n \n if img_crop.pixels.shape[0]==1:\n img_crop.pixels=np.squeeze(img_crop.pixels)\n img_crop.sumprj=np.squeeze(img_crop.pixels)\n img_crop.maxprj=np.squeeze(img_crop.pixels)\n else:\n img_crop.sumprj=(np.sum(img_crop.pixels,axis=0))\n img_crop.maxprj=(np.amax(img_crop.pixels,axis=0))\n img_crop.meanprj=(np.mean(img_crop.pixels,axis=0))\n\n img_crop._metaData.update({\"size_z\": img_crop.pixels.shape[0]})\n img_crop._metaData.update({\"size_x\": img_crop.pixels.shape[1]})\n img_crop._metaData.update({\"size_y\": img_crop.pixels.shape[2]})\n img_crop._metaData.update({\"size_c\": img_crop.pixels.shape[3]})\n\n return img_crop",
"def subrange(x, onset=None, offset=None):\n return (y[(y >= onset) & ~(y > (offset))] for y in x)"
]
| [
"0.6906233",
"0.60154533",
"0.57687914",
"0.56916517",
"0.5654022",
"0.55755776",
"0.55728334",
"0.55718255",
"0.5528963",
"0.5498326",
"0.54335445",
"0.5349252",
"0.53329015",
"0.5331418",
"0.53038037",
"0.52538073",
"0.52528626",
"0.52475715",
"0.5232891",
"0.52242994",
"0.52166504",
"0.51746845",
"0.5157061",
"0.51404923",
"0.5133519",
"0.5117283",
"0.51151955",
"0.50807846",
"0.5068225",
"0.50633526"
]
| 0.67115897 | 1 |
Retrieves a lambday slice at an x coordinate specified by the cube's primary wavelength plus the given offset. | def _choose_x_slice(self, offset):
arr = None
axis = 0
length = self.data.shape[axis]
if isinstance(offset, int) and offset >= 0 and offset < length:
arr = self.data.take(offset, axis=axis)
if isinstance(offset, u.Quantity):
unit = self.axes_wcs.wcs.cunit[-1]
delta = self.axes_wcs.wcs.cdelt[-1] * unit
wloffset = offset.to(unit) / delta
wloffset = int(wloffset)
if wloffset >= 0 and wloffset < self.data.shape[axis]:
arr = self.data.take(wloffset, axis=axis)
return arr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _choose_wavelength_slice(self, offset):\n if 'WAVE' not in self.axes_wcs.wcs.ctype:\n raise cu.CubeError(2, \"Spectral dimension not present\")\n if self.data.ndim == 4:\n raise cu.CubeError(4, \"Can only work with 3D cubes\")\n\n axis = -2 if self.axes_wcs.wcs.ctype[0] in ['TIME', 'UTC'] else -1\n arr = None\n length = self.data.shape[axis]\n if isinstance(offset, int) and offset >= 0 and offset < length:\n arr = self.data.take(offset, axis=axis)\n\n if isinstance(offset, u.Quantity):\n delta = self.axes_wcs.wcs.cdelt[-1 - axis] * u.m\n wloffset = offset.to(u.m) / delta\n wloffset = int(wloffset)\n if wloffset >= 0 and wloffset < self.data.shape[axis]:\n arr = self.data.take(wloffset, axis=axis)\n\n return arr",
"def slice_to_lightcurve(self, wavelength, y_coord=None, x_coord=None):\n if self.axes_wcs.wcs.ctype[0] not in ['TIME', 'UTC']:\n raise cu.CubeError(1,\n 'Cannot create a lightcurve with no time axis')\n if self.axes_wcs.wcs.ctype[1] != 'WAVE':\n raise cu.CubeError(2, 'A spectral axis is needed in a lightcurve')\n if self.data.ndim == 3:\n data = self._choose_wavelength_slice(wavelength)\n if y_coord is not None:\n data = data[:, cu.pixelize(y_coord, self.axes_wcs, 1)]\n else:\n if y_coord is None and x_coord is None:\n raise cu.CubeError(4, \"At least one coordinate must be given\")\n if y_coord is None:\n y_coord = slice(None, None, None)\n else:\n y_coord = cu.pixelize(y_coord, self.axes_wcs, 2)\n if x_coord is None:\n x_coord = slice(None, None, None)\n else:\n x_coord = cu.pixelize(x_coord, self.axes_wcs, 3)\n item = (slice(None, None, None), wavelength, y_coord, x_coord)\n data = self.data[item]\n\n return LightCurve(data=data, meta=self.meta)",
"def plot_wavelength_slice(self, offset, **kwargs):\n cumul_cube_lengths = np.cumsum(np.array([c.shape[self.common_axis]\n for c in self.data]))\n sequence_index, cube_index = cu._convert_cube_like_index_to_sequence_indices(\n offset, cumul_cube_lengths)\n plot = self[sequence_index].plot_wavelength_slice(cube_index, **kwargs)\n return plot",
"def slice(\n sample, \n ei, psi_axis,\n hkl0, hkl_dir, x_axis, \n instrument, erange,\n out):\n from mcvine.workflow.sample import loadSampleYml\n sample = loadSampleYml(sample)\n code = \"from mcvine.workflow.DGS import %s as mod\" % instrument\n d = {}; exec(code, d); mod = d['mod']\n psi_angles = np.arange(*tuple(psi_axis))\n x_axis = np.arange(*tuple(x_axis))\n from matplotlib import pyplot as plt\n plt.figure()\n from ...singlextal import dynrange\n dynrange.plotDynRangeOfSlice(\n sample, psi_angles, ei, hkl0, hkl_dir, x_axis,\n mod.scattering_angle_constraints,\n Erange=erange)\n if out:\n plt.savefig(out)\n else:\n plt.show()\n return",
"def plot_x_slice(self, offset, **kwargs):\n cumul_cube_lengths = np.cumsum(np.array([c.shape[self.common_axis]\n for c in self.data]))\n sequence_index, cube_index = cu._convert_cube_like_index_to_sequence_indices(\n offset, cumul_cube_lengths)\n plot = self[sequence_index].plot_x_slice(cube_index, **kwargs)\n return plot",
"def offset(self, offset):\n return Line3d(self.p + offset * self.cross.normalized(), self.v)",
"def offset(self, offset):\n return Line(self.p + offset * self.cross_z.normalized(), self.v)",
"def relay_array_getitem(c, a, start, stop, strides):\n assert start.is_constant(tuple)\n assert stop.is_constant(tuple)\n assert strides.is_constant(tuple)\n return relay.op.transform.strided_slice(c.ref(a), start.value, stop.value,\n strides.value)",
"def _slice_at_axis(sl, axis):\n return (slice(None),) * axis + (sl,) + (...,)",
"def Offset_Data(x, offset):\r\n\r\n import numpy as np;\r\n \r\n offset = np.ones(x.shape)*offset;\r\n x = x-offset;\r\n return x;",
"def _get_slice(series, start, length):\n return [ int(s) for s in series[start:start+length] ]",
"def getSlice(properties=None, **kw):",
"def _slice(self, start, stop, step=None):\n\n slices = [slice(None)] * self.data.ndim\n slices[self.axis] = slice(start, stop, step)\n return tuple(slices)",
"def create_slice(*, stop : Optional[int] = None, start : Optional[int] = None, step : Optional[int] = None) -> slice:\n return slice(start, stop, step)",
"def get1DSlice( self, slices: list ):\n assert(len(slices)==self._nDims-1)\n slices.append(slice(self._nGlobalCoords[self._layout.dims_order[-1]]))\n return self._f[tuple(slices)]",
"def get_slice(self, limit, offset):\r\n if limit == 0:\r\n return self.objects[offset:]\r\n\r\n return self.objects[offset:offset + limit]",
"def slice_during(self, e):\r\n\r\n if not isinstance(e, Epochs):\r\n raise ValueError('e has to be of Epochs type')\r\n\r\n if e.data.ndim > 0:\r\n raise NotImplementedError('e has to be a scalar Epoch')\r\n\r\n if self.ndim != 1:\r\n e_s = 'slicing only implemented for 1-d TimeArrays'\r\n return NotImplementedError(e_s)\r\n i_start = self.index_at(e.start)\r\n i_stop = self.index_at(e.stop)\r\n if e.start > self[i_start]: # make sure self[i_start] is in epoch e\r\n i_start += 1\r\n if e.stop > self[i_stop]: # make sure to include self[i_stop]\r\n i_stop += 1\r\n\r\n return slice(i_start, i_stop)",
"def slice_layer(start, end, step=None, axis=1):\n if axis < 0:\n raise ValueError(\"'slice_layer' can only work on a specified axis > 0\")\n\n def slice_func(x):\n slices = [slice(None)] * axis\n slices.append(slice(start, end, step))\n return x[tuple(slices)]\n\n return Lambda(slice_func)",
"def cut_dyadic(x):\n n = x.shape[0]\n j = math.floor(math.log2(n))\n m = 2**j\n return lax.dynamic_slice(x, (0,), (m,))",
"def get_slice(self, n):\n if n == 0:\n return slice(self._lo_atom, self._lo_atom + self._n_atoms)\n raise IndexError(f\"{n} is invalid for a 1 dimension Slice \")",
"def _fprop_slice_np(h, stride, H, roi_offset):\n hstart = int(np.floor(float(h) * stride))\n hend = int(np.ceil(float(h + 1) * stride))\n\n hstart = min(max(hstart + roi_offset, 0), H)\n hend = min(max(hend + roi_offset, 0), H)\n\n return slice(hstart, hend), hend - hstart",
"def get_row_slice(xlrd_sheet, start_row):\r\n num_rows = xlrd_sheet.nrows\r\n\r\n for _ in range(num_rows):\r\n # print start_row\r\n yield xlrd_sheet.row_slice(rowx=start_row, start_colx=0, end_colx=3)\r\n start_row += 1",
"def slice_timeseries(n_slices,dataset):\n\n n,l=np.shape(dataset)\n\n X = np.reshape(dataset,(n*n_slices,l//n_slices))\n\n print('sliced data shape (nr. of slices, slice length):',np.shape(X))\n print('#####################################')\n \n return X",
"def _shifted(self, aslice):\n return slice(\n self._start if aslice.start is None else self._clamp(aslice.start),\n self._stop if aslice.stop is None else self._clamp(aslice.stop),\n aslice.step)",
"def center_slice(x_data, cutoff):\n if x_data.ndim <= 1:\n raise RuntimeError(\"Data should be greater than 1D\")\n\n make_slice = sequence(\n lambda x: x_data.shape[1:][x] // 2, lambda x: slice(x - cutoff, x + cutoff + 1)\n )\n\n return pipe(\n range(len(x_data.shape) - 1),\n map_(make_slice),\n tuple,\n lambda x: (slice(len(x_data)),) + x,\n lambda x: x_data[x],\n )",
"def plot_offset_between_cubes(cube, x, y, wl, medfilt_window=151, show_plot=False):\n\n smooth_x = signal.medfilt(x, medfilt_window)\n smooth_y = signal.medfilt(y, medfilt_window)\n\n print(np.nanmean(smooth_x))\n print(np.nanmean(smooth_y))\n\n fig, ax = plt.subplots(figsize=(10, 5))\n wl = cube.RSS.wavelength\n ax.plot(wl, x, \"k.\", alpha=0.1)\n ax.plot(wl, y, \"r.\", alpha=0.1)\n ax.plot(wl, smooth_x, \"k-\")\n ax.plot(wl, smooth_y, \"r-\")\n # plt.plot(wl, x_max-np.nanmedian(x_max), 'g-')\n # plt.plot(wl, y_max-np.nanmedian(y_max), 'y-')\n ax.set_ylim(-1.6, 1.6)\n if show_plot:\n plt.show()\n return fig",
"def slice_lane(lane, label, window_size, recovery):\n sub_lanes = []\n sub_labels = []\n\n magnifier = ImageMagnifier(lane, label, window_size, recovery)\n\n for (sub_lane, sub_label) in magnifier:\n sub_lanes.append(sub_lane.astype(dtype='float32'))\n sub_labels.append(sub_label)\n\n return np.array(sub_lanes), np.array(sub_labels).astype(dtype='float32'), LaneIterator(len(magnifier), window_size, recovery, lane.shape[1])",
"def slice_during(self, e):\r\n\r\n if not isinstance(e, Epochs):\r\n raise ValueError('e has to be of Epochs type')\r\n\r\n if e.data.ndim > 0:\r\n raise NotImplementedError('e has to be a scalar Epoch')\r\n\r\n if self.ndim != 1:\r\n e_s = 'slicing only implemented for 1-d TimeArrays'\r\n return NotImplementedError(e_s)\r\n\r\n # These two should be called with modes, such that they catch the right\r\n # slice\r\n start = self.index_at(e.start, mode='after')\r\n stop = self.index_at(e.stop, mode='before')\r\n\r\n # If *either* the start or stop index object comes back as the empty\r\n # array, then it means the condition is not satisfied, we return the\r\n # slice that does [:0], i.e., always slices to nothing.\r\n if start.shape == (0,) or stop.shape == (0,):\r\n return slice(0)\r\n\r\n # Now, we know the start/stop are not empty arrays, but they can be\r\n # either scalars or arrays.\r\n i_start = start if np.isscalar(start) else start.max()\r\n i_stop = stop if np.isscalar(stop) else stop.min()\r\n\r\n if e.start > self[i_start]: # make sure self[i_start] is in epoch e\r\n i_start += 1\r\n if e.stop > self[i_stop]: # make sure to include self[i_stop]\r\n i_stop += 1\r\n\r\n return slice(i_start, i_stop)",
"def slice(list, point):\n index = list.index(point)\n slices = []\n \n slices.append(list[:index])\n slices.append(list[index + 1:])\n \n return slices",
"def carve_slice(\n self, x_index=0, width=config()[\"panel\"][\"width\"],\n ):\n piece = []\n for row in self.grid:\n piece.append(row[x_index : x_index + width])\n\n return piece"
]
| [
"0.63117456",
"0.5793032",
"0.5629141",
"0.5089525",
"0.5078301",
"0.506176",
"0.50505126",
"0.5043352",
"0.49999565",
"0.49748117",
"0.4926123",
"0.49144813",
"0.49112678",
"0.4907331",
"0.48990658",
"0.48967353",
"0.4890172",
"0.4886623",
"0.48143712",
"0.4807352",
"0.4763628",
"0.4715056",
"0.47144327",
"0.4713819",
"0.47130302",
"0.47090024",
"0.47084352",
"0.46993598",
"0.46963003",
"0.46831876"
]
| 0.61754596 | 1 |
Converts a given frequency chunk to a SunPy Map. Extra parameters are passed on to Map. | def slice_to_map(self, chunk, snd_dim=None, *args, **kwargs):
if self.axes_wcs.wcs.ctype[1] == 'WAVE' and self.data.ndim == 3:
error = "Cannot construct a map with only one spatial dimension"
raise cu.CubeError(3, error)
if isinstance(chunk, tuple):
item = slice(cu.pixelize(chunk[0], self.axes_wcs, -1),
cu.pixelize(chunk[1], self.axes_wcs, -1), None)
maparray = self.data[item].sum(0)
else:
maparray = self.data[cu.pixelize(chunk, self.axes_wcs, -1)]
if self.data.ndim == 4:
if snd_dim is None:
error = "snd_dim must be given when slicing hypercubes"
raise cu.CubeError(4, error)
if isinstance(snd_dim, tuple):
item = slice(cu.pixelize(snd_dim[0], self.axes_wcs, -1),
cu.pixelize(snd_dim[1], self.axes_wcs, -1), None)
maparray = maparray[item].sum(0)
else:
maparray = maparray[cu.pixelize(snd_dim, self.axes_wcs, -1)]
mapheader = MetaDict(self.meta)
gmap = GenericMap(data=maparray, header=mapheader, *args, **kwargs)
return gmap | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _createMap(self):\n width = self.map_size[0] * self.chunk_size\n height = self.map_size[1] * self.chunk_size\n map_array = np.zeros((height, width), dtype=float)\n chunks = {}\n clist = []\n for i in range(0, self.map_size[0]*self.map_size[1]):\n chunks[i+1] = Chunk(self)\n chunk_array = np.asarray(list(chunks.keys()))\n chunk_array.resize(self.map_size[0], self.map_size[1])\n return map_array, chunk_array, chunks",
"def update_maps(self):\n if self.fmodel is None:\n return\n def fft_map(map_coeffs, resolution_factor = 0.25):\n return map_coeffs.fft_map(resolution_factor = resolution_factor,\n ).apply_sigma_scaling().real_map_unpadded()\n map_types = [\"2mFo-DFc\", \"mFo-DFc\"]\n map_keys = [\"2mFo-DFc\", \"mFo-DFc\"]\n if (self.fmodel.f_obs().anomalous_flag()):\n if (self.params.anom_map_type == \"phaser\"):\n map_types.append(\"llg\")\n elif (self.params.anom_map_type == \"residual\"):\n map_types.append(\"anom_residual\")\n else :\n map_types.append(\"anom\")\n map_keys.append(\"anom\")\n if (self.use_svm):\n map_types.append(\"mFo\")\n map_keys.append(\"mFo\")\n # To save memory, we sample atomic positions immediately and throw out\n # the actual maps (instead of keeping up to 3 in memory)\n sites_frac = self.xray_structure.sites_frac()\n sites_cart = self.xray_structure.sites_cart()\n self._principal_axes_of_inertia = [ None ] * len(sites_frac)\n self._map_variances = [ None ] * len(sites_frac)\n self._map_gaussian_fits = {}\n self.calpha_mean_two_fofc = 0\n for map_type, map_key in zip(map_types, map_keys):\n real_map = self.get_map(map_type)\n if (real_map is not None):\n # Gather values for map peaks at each site\n self._map_values[map_key] = flex.double(sites_frac.size(), 0)\n self._map_gaussian_fits[map_key] = [ None ] * len(sites_frac)\n for i_seq, site_frac in enumerate(sites_frac):\n atom = self.pdb_atoms[i_seq]\n resname = atom.fetch_labels().resname.strip().upper()\n if (resname in WATER_RES_NAMES + mmtbx.ions.SUPPORTED or\n atom.segid.strip().upper() in [\"ION\"]):\n value = real_map.eight_point_interpolation(site_frac)\n self._map_values[map_key][i_seq] = value\n if (self.use_svm):\n gaussian_fit = utils.fit_gaussian(\n unit_cell=self.unit_cell,\n site_cart=atom.xyz,\n real_map=real_map)\n self._map_gaussian_fits[map_key][i_seq] = gaussian_fit\n\n if map_type in [\"2mFo-DFc\"]:\n # Gather values on map variance and principal axes of interia\n from cctbx import maptbx\n for i_seq, site_cart in enumerate(sites_cart):\n resname = self.pdb_atoms[i_seq].fetch_labels().resname.strip()\n if resname in WATER_RES_NAMES + mmtbx.ions.SUPPORTED:\n # XXX not totally confident about how I'm weighting this...\n p_a_i = maptbx.principal_axes_of_inertia(\n real_map = real_map,\n site_cart = site_cart,\n unit_cell = self.unit_cell,\n radius = self.params.map_sampling_radius)\n self._principal_axes_of_inertia[i_seq] = p_a_i\n variance = maptbx.spherical_variance_around_point(\n real_map = real_map,\n unit_cell = self.unit_cell,\n site_cart = site_cart,\n radius = self.params.map_sampling_radius)\n self._map_variances[i_seq] = variance\n elif (i_seq in self.calpha_sel):\n # Also collect some info in average C_alpha 2FoFc peak heights\n self.calpha_mean_two_fofc += real_map.eight_point_interpolation(\n sites_frac[i_seq])\n del real_map\n\n if (self.calpha_mean_two_fofc > 0):\n n_calpha = len(self.calpha_sel)\n assert (n_calpha > 0)\n self.calpha_mean_two_fofc /= n_calpha\n\n # Gather info on carbons' average Fo peak height for use in estimating other\n # sites' atomic weight\n self.carbon_fo_values = None\n if (len(self.carbon_sel) > 0):\n self.carbon_fo_values = flex.double()\n self._map_values[\"mFo\"] = flex.double(sites_frac.size(), 0)\n fo_map = fft_map(self.fmodel.map_coefficients(\n map_type = \"mFo\",\n exclude_free_r_reflections = True,\n fill_missing = True))\n\n for i_seq, site_frac in enumerate(sites_frac):\n resname = self.pdb_atoms[i_seq].fetch_labels().resname.strip()\n element = self.pdb_atoms[i_seq].element.strip()\n if (element == \"C\") or ((element == \"O\") and (resname in WATER_RES_NAMES)):\n map_value = fo_map.eight_point_interpolation(site_frac)\n self._map_values[\"mFo\"][i_seq] = map_value\n if (element == \"C\"):\n self.carbon_fo_values.append(map_value)\n del fo_map",
"def map(self, mapunit):\n\n #The number of bands to measure the LF for\n if len(mapunit['luminosity'].shape)>1:\n self.nbands = mapunit['luminosity'].shape[1]\n else:\n mapunit['luminosity'] = np.atleast_2d(mapunit['luminosity']).T\n self.nbands = 1\n\n #If only measuring for centrals, get the appropriate\n #rows of the mapunit\n\n mu = {}\n if self.central_only:\n delete_after_map = True\n for k in mapunit.keys():\n mu[k] = mapunit[k][mapunit['central']==1]\n else:\n delete_after_map = False\n mu = mapunit\n\n #Want to count galaxies in bins of luminosity for\n #self.nbands different bands in self.nzbins\n #redshift bins\n if self.lumcounts is None:\n self.lumcounts = np.zeros((self.njack, len(self.magbins)-1,\n self.nbands, self.nzbins))\n\n #Assume redshifts are provided, and that the\n #mapunit is sorted in terms of them\n \n if self.lightcone:\n for i, z in enumerate(self.zbins[:-1]):\n zlidx = mu['redshift'].searchsorted(self.zbins[i])\n zhidx = mu['redshift'].searchsorted(self.zbins[i+1])\n\n #Count galaxies in bins of luminosity\n for j in range(self.nbands):\n if not self.CMASS:\n c, e = np.histogram(mu['luminosity'][zlidx:zhidx,j],\n bins=self.magbins)\n else:\n cidx = self.selectCMASS(mu['appmag'][zlidx:zhidx])\n c, e = np.histogram(mu['luminosity'][zlidx:zhidx,j][cidx],\n bins=self.magbins)\n \n self.lumcounts[self.jcount,:,j,i] += c\n else:\n for j in range(self.nbands):\n if not self.CMASS:\n c, e = np.histogram(mu['luminosity'][:,j],\n bins=self.magbins)\n else:\n cidx = self.selectCMASS(mu['appmag'][:])\n c, e = np.histogram(mu['luminosity'][:,j][cidx],\n bins=self.magbins)\n \n self.lumcounts[self.jcount,:,j,0] += c\n\n if delete_after_map:\n True",
"def to_mapping(self, dim):\n mim = cifti2.Cifti2MatrixIndicesMap([dim], 'CIFTI_INDEX_TYPE_SERIES')\n mim.series_exponent = 0\n mim.series_start = self.start\n mim.series_step = self.step\n mim.number_of_series_points = self.size\n mim.series_unit = self.unit\n return mim",
"def mappings(input_report, **kwargs):\n ben = BoomerEngine()\n ben.load(input_report, prefix_map=global_prefix_map)\n writer = StreamingSssomWriter()\n for m in ben.mappings(**kwargs):\n writer.emit(m)\n writer.finish()",
"def _transform_map_data(self):\n WARD_FMT = '%s-%s'\n self.map_data_trans = []\n lookup = {i.column: ''.join(filter(lambda x: x.isdigit(), i.value)) for i in self.sht[1]}\n\n #skip over header\n rs = iter(self.sht.rows)\n next(rs)\n next(rs)\n for r in rs:\n pka = r[0].value\n for c in r[1:]:\n if c.value is None:\n c.value = 0\n\n self.map_data_trans.append((WARD_FMT%(pka, lookup[c.column]), c.value))",
"def map():",
"def _get_freq_map(min_freq, max_freq, num_freq, dtype=torch.float32):\n if num_freq > 1:\n step = float(max_freq - min_freq) / (num_freq - 1)\n map = torch.arange(start=min_freq,\n end=max_freq + step,\n step=step,\n dtype=dtype)\n return torch.reshape(map, (1, 1, -1, 1))\n elif num_freq == 1:\n return torch.tensor([float(max_freq + min_freq) / 2]).view([1, 1, -1, 1])\n else:\n raise ValueError('num_freq should be positive but we got: {}'.format(num_freq))",
"def make_map(self, sampling=None, size=None):\n if sampling is None and self.sampling is None:\n self.sampling = self.minimum_sampling()\n elif sampling is not None:\n self.sampling = sampling\n \n if size is None and self.size is None:\n self.size = self.minimum_size()\n elif size is not None:\n self.size = size\n\n # Build the on-sky source distribution\n self.seeing.make_map(sampling=self.sampling, size=self.size)\n self.x = self.seeing.x\n self.X = self.seeing.X\n self.y = self.seeing.y\n self.Y = self.seeing.Y\n try:\n # Construct the intrinsic map of the source\n self.intrinsic.make_map(sampling=self.sampling, size=self.size)\n # Convolve with the seeing distribution, conserving the\n # integral of the intrinsic source\n self.data = signal.fftconvolve(self.intrinsic.data,\n self.seeing.data * numpy.square(self.sampling),\n mode='same')\n except AttributeError:\n # Renormalize the unity-integral seeing kernal for to\n # represent a point source\n self.data = self.intrinsic*self.seeing.data\n\n # Get the integral\n try:\n # After convolving with the seeing kernel, the total\n # integral should be the same, up to some tolerance\n self.integral = self.intrinsic.integral\n tolerance = 1e-3\n diff = numpy.absolute(self.integral - numpy.square(self.sampling)*numpy.sum(self.data))\n if diff > tolerance:\n warnings.warn('Map and analytic integrals are discrepant by {0} ({1} %)'.format(\n diff, 100*diff/self.integral))\n except AttributeError:\n self.integral = numpy.square(self.sampling) * numpy.sum(self.data)\n\n # Prep for interpolation\n self.interp = interpolate.interp2d(self.x, self.y, self.data, bounds_error=True)",
"def kolmomap(xx,yy,amp,wavelength,angle,phase):\n sinemap=sine2d(xx,yy,amp[0],wavelength[0],angle[0]/180.*pi,phase[0])*0.\n for counter in range(len(amp)):\n sinemap=sinemap+sine2d(xx,yy,amp[counter],wavelength[counter],angle[counter]/180.*pi,phase[counter])\n return sinemap",
"def spotmap(week, bam, pt):\n print('Mapping MCAP data to Metrics... ',end=''), \n bam['Spotfire Data'] = bam.index.to_series().map(pt.fillna(0)['len',week])\n print('Done')\n return bam",
"def mapping(nside, angs):\n\n\tnpix = hp.nside2npix(nside)\n\tmaph = np.zeros(npix)\n\n\tpix = hp.ang2pix(nside, angs[:, 1], angs[:, 0])\n\tvals, times = np.unique(pix, return_counts=True)\n\n\tmaph[vals] = times\n\tmaph *= float(npix)/len(angs)\n\n\treturn maph",
"def fft_mtz_to_map(mtz_file, map_file, cols):\n\n # Initialise\n writer = CommandManager('fft')\n # Set Program Arguments\n writer.add_command_line_arguments('hklin',mtz_file,'mapout',map_file)\n # Set Program Input\n writer.add_standard_input(['LABIN F1={!s} PHI={!s}'.format(cols['F'],cols['P']),'END'])\n # RUN!\n writer.run()\n # Check Output\n if writer.process.returncode != 0:\n print('\\nOUT\\n\\n'+writer.out)\n print('\\nERR\\n\\n'+writer.err)\n raise RuntimeError('fft failed to generate map from {!s}'.format(mtz_file))\n\n return writer",
"def _makeimap(self):\n self.map_['source'] = 'GOES'\n self.map_['provider'] = 'NOAA'\n self.map_['instrument'] = 'SUVI'\n self.map_['physobs'] = 'flux'",
"def _init_meg_map_dict(bands, length=0):\n\n # Initialize dictionary\n meg_map = dict()\n\n # Add oscillation bands\n for band in bands:\n meg_map[band] = np.zeros(length)\n\n return meg_map",
"def disp_map(disp):\n map = np.array([\n [0, 0, 0, 114],\n [0, 0, 1, 185],\n [1, 0, 0, 114],\n [1, 0, 1, 174],\n [0, 1, 0, 114],\n [0, 1, 1, 185],\n [1, 1, 0, 114],\n [1, 1, 1, 0]\n ])\n # grab the last element of each column and convert into float type, e.g. 114 -> 114.0\n # the final result: [114.0, 185.0, 114.0, 174.0, 114.0, 185.0, 114.0]\n bins = map[0:map.shape[0] - 1, map.shape[1] - 1].astype(float)\n\n # reshape the bins from [7] into [7,1]\n bins = bins.reshape((bins.shape[0], 1))\n\n # accumulate element in bins, and get [114.0, 299.0, 413.0, 587.0, 701.0, 886.0, 1000.0]\n cbins = np.cumsum(bins)\n\n # divide the last element in cbins, e.g. 1000.0\n bins = bins / cbins[cbins.shape[0] - 1]\n\n # divide the last element of cbins, e.g. 1000.0, and reshape it, final shape [6,1]\n cbins = cbins[0:cbins.shape[0] - 1] / cbins[cbins.shape[0] - 1]\n cbins = cbins.reshape((cbins.shape[0], 1))\n\n # transpose disp array, and repeat disp 6 times in axis-0, 1 times in axis-1, final shape=[6, Height*Width]\n ind = np.tile(disp.T, (6, 1))\n tmp = np.tile(cbins, (1, disp.size))\n\n # get the number of disp's elements bigger than each value in cbins, and sum up the 6 numbers\n b = (ind > tmp).astype(int)\n s = np.sum(b, axis=0)\n\n bins = 1 / bins\n\n # add an element 0 ahead of cbins, [0, cbins]\n t = cbins\n cbins = np.zeros((cbins.size + 1, 1))\n cbins[1:] = t\n\n # get the ratio and interpolate it\n disp = (disp - cbins[s]) * bins[s]\n disp = map[s, 0:3] * np.tile(1 - disp, (1, 3)) + map[s + 1, 0:3] * np.tile(disp, (1, 3))\n\n return disp",
"def disp_map(disp):\n map = np.array([\n [0,0,0,114],\n [0,0,1,185],\n [1,0,0,114],\n [1,0,1,174],\n [0,1,0,114],\n [0,1,1,185],\n [1,1,0,114],\n [1,1,1,0]\n ])\n # grab the last element of each column and convert into float type, e.g. 114 -> 114.0\n # the final result: [114.0, 185.0, 114.0, 174.0, 114.0, 185.0, 114.0]\n bins = map[0:map.shape[0]-1,map.shape[1] - 1].astype(float)\n\n # reshape the bins from [7] into [7,1]\n bins = bins.reshape((bins.shape[0], 1))\n\n # accumulate element in bins, and get [114.0, 299.0, 413.0, 587.0, 701.0, 886.0, 1000.0]\n cbins = np.cumsum(bins)\n\n # divide the last element in cbins, e.g. 1000.0\n bins = bins / cbins[cbins.shape[0] -1]\n\n # divide the last element of cbins, e.g. 1000.0, and reshape it, final shape [6,1]\n cbins = cbins[0:cbins.shape[0]-1] / cbins[cbins.shape[0] -1]\n cbins = cbins.reshape((cbins.shape[0], 1))\n\n # transpose disp array, and repeat disp 6 times in axis-0, 1 times in axis-1, final shape=[6, Height*Width]\n ind = np.tile(disp.T, (6,1))\n tmp = np.tile(cbins, (1, disp.size))\n\n # get the number of disp's elements bigger than each value in cbins, and sum up the 6 numbers\n b = (ind > tmp).astype(int)\n s = np.sum(b, axis=0)\n\n bins = 1 / bins\n\n # add an element 0 ahead of cbins, [0, cbins]\n t = cbins\n cbins = np.zeros((cbins.size+1,1))\n cbins[1:] = t\n\n # get the ratio and interpolate it\n disp = (disp - cbins[s]) * bins[s]\n disp = map[s,0:3] * np.tile(1 - disp,(1,3)) + map[s + 1,0:3] * np.tile(disp,(1,3))\n\n return disp",
"def addChunk(self, direction):\n pass\n\n ## get size of actual map\n ## create array of fitting size\n ## stack created array to map",
"def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('CHEL').get('abstractTypes')\n exolinks = globalMap.get('CHEL').get('exolinks')\n\n # DataType HalfLifeType\n currentMap = {}\n abstractTypes['HalfLifeType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-07-18:18:10_00002'] = currentMap\n loadMaps['CHEL.HalfLifeType'] = currentMap\n currentMap['tag'] = 'CHEL.HalfLifeType'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-07-18:18:10_00002'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # Class ChemElement\n currentMap = {}\n abstractTypes['ChemElement'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00004'] = currentMap\n loadMaps['CHEL.ChemElement'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00004'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'chemElements'\n currentMap['objkey'] = 'symbol'\n currentMap['class'] = ccp.api.molecule.ChemElement.ChemElement\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ChemElement.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ChemElement.atomNumber\n currentMap = {}\n contentMap['atomNumber'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00017'] = currentMap\n loadMaps['CHEL.ChemElement.atomNumber'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.atomNumber'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00017'\n currentMap['name'] = 'atomNumber'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute ChemElement.atomicRadius\n currentMap = {}\n contentMap['atomicRadius'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00018'] = currentMap\n loadMaps['CHEL.ChemElement.atomicRadius'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.atomicRadius'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00018'\n currentMap['name'] = 'atomicRadius'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute ChemElement.covalentRadius\n currentMap = {}\n contentMap['covalentRadius'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00019'] = currentMap\n loadMaps['CHEL.ChemElement.covalentRadius'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.covalentRadius'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00019'\n currentMap['name'] = 'covalentRadius'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute ChemElement.mass\n currentMap = {}\n contentMap['mass'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00020'] = currentMap\n loadMaps['CHEL.ChemElement.mass'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.mass'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00020'\n currentMap['name'] = 'mass'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute ChemElement.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00021'] = currentMap\n loadMaps['CHEL.ChemElement.name'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00021'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00055')\n\n # Attribute ChemElement.symbol\n currentMap = {}\n contentMap['symbol'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00015'] = currentMap\n loadMaps['CHEL.ChemElement.symbol'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.symbol'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00015'\n currentMap['name'] = 'symbol'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00055')\n\n # Role ChemElement.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role ChemElement.isotopes\n currentMap = {}\n contentMap['isotopes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00004'] = currentMap\n loadMaps['CHEL.ChemElement.isotopes'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.isotopes'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00004'\n currentMap['name'] = 'isotopes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('CHEL').get('abstractTypes')\n # End of ChemElement\n\n currentMap = abstractTypes.get('ChemElement')\n aList = ['atomNumber', 'atomicRadius', 'covalentRadius', 'mass', 'name', 'symbol']\n currentMap['headerAttrs'] = aList\n aList = ['isotopes', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['isotopes']\n currentMap['children'] = aList\n\n # Class ChemElementStore\n currentMap = {}\n abstractTypes['ChemElementStore'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00005'] = currentMap\n loadMaps['CHEL.ChemElementStore'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElementStore'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00005'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'chemElementStores'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.molecule.ChemElement.ChemElementStore\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ChemElementStore.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ChemElementStore.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute ChemElementStore.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute ChemElementStore.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute ChemElementStore.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute ChemElementStore.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00024'] = currentMap\n loadMaps['CHEL.ChemElementStore.name'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElementStore.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00024'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role ChemElementStore.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role ChemElementStore.chemElements\n currentMap = {}\n contentMap['chemElements'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00023'] = currentMap\n loadMaps['CHEL.ChemElementStore.chemElements'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElementStore.chemElements'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00023'\n currentMap['name'] = 'chemElements'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CHEL').get('abstractTypes')\n # End of ChemElementStore\n\n currentMap = abstractTypes.get('ChemElementStore')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy']\n currentMap['headerAttrs'] = aList\n aList = ['name']\n currentMap['simpleAttrs'] = aList\n aList = ['chemElements', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['chemElements']\n currentMap['children'] = aList\n\n # Class Isotope\n currentMap = {}\n abstractTypes['Isotope'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00003'] = currentMap\n loadMaps['CHEL.Isotope'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00003'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'isotopes'\n currentMap['objkey'] = 'massNumber'\n currentMap['class'] = ccp.api.molecule.ChemElement.Isotope\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Isotope.abundance\n currentMap = {}\n contentMap['abundance'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00011'] = currentMap\n loadMaps['CHEL.Isotope.abundance'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.abundance'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00011'\n currentMap['name'] = 'abundance'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00058')\n\n # Attribute Isotope.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Isotope.gyroMagneticRatio\n currentMap = {}\n contentMap['gyroMagneticRatio'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00008'] = currentMap\n loadMaps['CHEL.Isotope.gyroMagneticRatio'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.gyroMagneticRatio'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00008'\n currentMap['name'] = 'gyroMagneticRatio'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.halfLife\n currentMap = {}\n contentMap['halfLife'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00001'] = currentMap\n loadMaps['CHEL.Isotope.halfLife'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.halfLife'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00001'\n currentMap['name'] = 'halfLife'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00007')\n\n # Attribute Isotope.halfLifeError\n currentMap = {}\n contentMap['halfLifeError'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00002'] = currentMap\n loadMaps['CHEL.Isotope.halfLifeError'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.halfLifeError'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00002'\n currentMap['name'] = 'halfLifeError'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00007')\n\n # Attribute Isotope.halfLifeType\n currentMap = {}\n contentMap['halfLifeType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00003'] = currentMap\n loadMaps['CHEL.Isotope.halfLifeType'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.halfLifeType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00003'\n currentMap['name'] = 'halfLifeType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'unknown'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-06-07-18:18:10_00002')\n\n # Attribute Isotope.magneticMoment\n currentMap = {}\n contentMap['magneticMoment'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00013'] = currentMap\n loadMaps['CHEL.Isotope.magneticMoment'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.magneticMoment'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00013'\n currentMap['name'] = 'magneticMoment'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.mass\n currentMap = {}\n contentMap['mass'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00010'] = currentMap\n loadMaps['CHEL.Isotope.mass'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.mass'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00010'\n currentMap['name'] = 'mass'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.massNumber\n currentMap = {}\n contentMap['massNumber'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00007'] = currentMap\n loadMaps['CHEL.Isotope.massNumber'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.massNumber'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00007'\n currentMap['name'] = 'massNumber'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Isotope.quadrupoleMoment\n currentMap = {}\n contentMap['quadrupoleMoment'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00014'] = currentMap\n loadMaps['CHEL.Isotope.quadrupoleMoment'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.quadrupoleMoment'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00014'\n currentMap['name'] = 'quadrupoleMoment'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.receptivity\n currentMap = {}\n contentMap['receptivity'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00012'] = currentMap\n loadMaps['CHEL.Isotope.receptivity'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.receptivity'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00012'\n currentMap['name'] = 'receptivity'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.spin\n currentMap = {}\n contentMap['spin'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00009'] = currentMap\n loadMaps['CHEL.Isotope.spin'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.spin'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00009'\n currentMap['name'] = 'spin'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role Isotope.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of Isotope\n\n currentMap = abstractTypes.get('Isotope')\n aList = ['abundance', 'gyroMagneticRatio', 'halfLife', 'halfLifeError', 'halfLifeType', 'magneticMoment', 'mass', 'massNumber', 'quadrupoleMoment', 'receptivity', 'spin']\n currentMap['headerAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Out-of-package link to ChemElement\n currentMap = {}\n exolinks['ChemElement'] = currentMap\n loadMaps['CHEL.exo-ChemElement'] = currentMap\n currentMap['tag'] = 'CHEL.exo-ChemElement'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00004'\n currentMap['name'] = 'ChemElement'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemElement.ChemElement\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00055'))\n\n # Out-of-package link to ChemElementStore\n currentMap = {}\n exolinks['ChemElementStore'] = currentMap\n loadMaps['CHEL.exo-ChemElementStore'] = currentMap\n currentMap['tag'] = 'CHEL.exo-ChemElementStore'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00005'\n currentMap['name'] = 'ChemElementStore'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemElement.ChemElementStore\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to Isotope\n currentMap = {}\n exolinks['Isotope'] = currentMap\n loadMaps['CHEL.exo-Isotope'] = currentMap\n currentMap['tag'] = 'CHEL.exo-Isotope'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00003'\n currentMap['name'] = 'Isotope'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemElement.Isotope\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00055'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))",
"def apply(smap, parameters=None):\r\n if parameters is None:\r\n parameters = {}\r\n\r\n for t in smap:\r\n t.properties[STOCHASTIC_DISTRIBUTION] = smap[t]",
"def create_map(self, data_file):\n mapping = []\n root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n path_to_file = os.path.join(root, DATA_PATH, data_file)\n with open(path_to_file, 'r') as file:\n lines = file.readlines()\n list_array = [x.strip('\\n') for x in lines]\n for line in list_array:\n mapping.append(list(line))\n\n self.map = mapping",
"def mapper(self, _, doc):\n ret = doc.split('\\t')\n key = ret[2]\n values = {}\n try:\n values[\"ts_ini\"] = datetime.utcfromtimestamp(float(ret[0]))\n except:\n values[\"ts_ini\"] = None\n try:\n values[\"ts_end\"] = datetime.utcfromtimestamp(float(ret[1]))\n except:\n values[\"ts_end\"] = None\n try:\n values[\"value\"] = ret[3]\n except:\n values[\"value\"] = None\n try:\n values[\"energytype\"] = ret[4]\n except:\n values[\"energytype\"] = None\n try:\n values[\"source\"] = ret[5]\n except:\n values[\"source\"] = None\n\n yield key, values",
"def build_map(chunk_start, result, total_chunks, start_id, end_id):\n size = len(chunk_start)\n for i in prange(size):\n beg = chunk_start[i]\n end = chunk_start[i + 1] if i < size - 1 else total_chunks\n if start_id < end and beg < end_id: # [beg, end) intersect [start_id, end_id)\n result[max(beg - start_id, 0) : (end - start_id), 0] = beg\n result[max(beg - start_id, 0) : (end - start_id), 1] = end",
"def _makeimap(self):\n self.map_[\"source\"] = \"nasa\"\n self.map_[\"instrument\"] = \"goes\"\n self.map_[\"physobs\"] = \"irradiance\"\n self.map_[\"provider\"] = \"sdac\"",
"def generateNeighborMap(self):\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(np.array([i.replace(\"#\",\" \")\n .split()[0:4] for i in value.index])\n .astype(float))\n\n B=np.array(A[0]).reshape(len(A[0]),4)\n print (B[:,0]+B[:,1])/2\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(value.sum(axis=1).values)\n print A",
"def convert_to_mm_per_day(maps, units='kg m-2 s-1'):\n if units == 'kg m-2 s-1':\n return np.multiply(maps, 86400)\n else:\n raise ValueError('Conversion for units=%s not supported' % units)",
"def __build_map(self):\n columns = []\n\n for i in range(self.__dimensions):\n columns.append([])\n\n for i in range(self.__dimensions):\n self.map.append(columns)",
"def school_sitemaps(chunk=2000):\n school_sitemap = {}\n schools = School.objects.all()\n paginated_schools = Paginator(schools, chunk)\n for this_page in paginated_schools.page_range:\n school_dict = {\n 'queryset': paginated_schools.page(this_page).object_list,\n 'date_field': 'updated_on',\n }\n school_sitemap['schools_%s' % this_page] = GenericSitemap(school_dict, priority=0.6, changefreq='monthly')\n return school_sitemap",
"def build_article_map(f='./wikispeedia_paths-and-graph/articles.tsv'):\n out_dict = {}\n count = 0\n with open(f, 'r') as r:\n for _ in xrange(12):\n next(r)\n for line in r:\n out_dict[line.strip('\\n')] = count\n count += 1\n return out_dict",
"def prepare_map(self):\n for y, row in enumerate(self.contents):\n for x, tile in enumerate(row):\n bm = self.get_tile(tile)\n self.image[\n y * TILE_SIZE : (y + 1) * TILE_SIZE,\n x * TILE_SIZE : (x + 1) * TILE_SIZE,\n ] = bm"
]
| [
"0.57270765",
"0.5564925",
"0.5475267",
"0.54156667",
"0.54104096",
"0.5340992",
"0.52815837",
"0.51907945",
"0.51874447",
"0.51529014",
"0.5140967",
"0.51343143",
"0.50851405",
"0.5076608",
"0.5042034",
"0.5038399",
"0.5007617",
"0.5004075",
"0.4992073",
"0.49909553",
"0.49845466",
"0.49730906",
"0.49619493",
"0.49615136",
"0.49530712",
"0.49528977",
"0.49385336",
"0.49385124",
"0.49293026",
"0.4924914"
]
| 0.6020252 | 0 |
For a timelambday cube, returns a lightcurve with curves at the specified wavelength and given ycoordinate. If no y is given, all of them will be used (meaning the lightcurve object could contain more than one timecurve.) | def slice_to_lightcurve(self, wavelength, y_coord=None, x_coord=None):
if self.axes_wcs.wcs.ctype[0] not in ['TIME', 'UTC']:
raise cu.CubeError(1,
'Cannot create a lightcurve with no time axis')
if self.axes_wcs.wcs.ctype[1] != 'WAVE':
raise cu.CubeError(2, 'A spectral axis is needed in a lightcurve')
if self.data.ndim == 3:
data = self._choose_wavelength_slice(wavelength)
if y_coord is not None:
data = data[:, cu.pixelize(y_coord, self.axes_wcs, 1)]
else:
if y_coord is None and x_coord is None:
raise cu.CubeError(4, "At least one coordinate must be given")
if y_coord is None:
y_coord = slice(None, None, None)
else:
y_coord = cu.pixelize(y_coord, self.axes_wcs, 2)
if x_coord is None:
x_coord = slice(None, None, None)
else:
x_coord = cu.pixelize(x_coord, self.axes_wcs, 3)
item = (slice(None, None, None), wavelength, y_coord, x_coord)
data = self.data[item]
return LightCurve(data=data, meta=self.meta) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lightCurve(self, time, filters):\n\n lcMags = np.zeros(time.size, dtype=float)\n\n rise = np.where(time <= self.peakTime)\n lcMags[rise] += self.riseSlope*time[rise]-self.riseSlope*self.peakTime\n decline = np.where(time > self.peakTime)\n lcMags[decline] += self.declineSlope*(time[decline]-self.peakTime)\n\n for key in self.peaks.keys():\n fMatch = np.where(filters == key)\n lcMags[fMatch] += self.peaks[key]\n\n return lcMags",
"def lCurve(self): \n\n # --------------------------------------------------------------------------------------------- #\n # Read data\n fitsNnam = os.path.join(self.workpath, 'LCresults.fits')\n lcTab = Table.read(fitsNnam)\n if (self.tstart is not None) and (self.tstop is not None):\n lcTab = lcTab[ (self.tstart <= lcTab['mjd']) & (lcTab['mjd'] <= self.tstop)]\n lcTab = lcTab[lcTab['flux'] != -1.] # avoid undone analyses\n\n timeMJD = lcTab['mjd']\n tref = int(np.floor( timeMJD[0] / 100.0)) * 100 # round to lowest hundred\n timeMJD -= tref\n ts = lcTab['ts']\n detect = lcTab['ts'] >= self.tsmin\n undet = lcTab['ts'] < self.tsmin\n flux = lcTab['flux'][detect]\n fluxerr = lcTab['fluxerr'][detect]\n upperl = lcTab['upperlim'][undet]\n upperl[upperl == -1.] = 0. # for when it failed\n scale = 10**int(np.floor(np.log10( np.mean( np.concatenate( (flux, upperl), axis=0) ) ))) \n\n # --------------------------------------------------------------------------------------------- #\n # Plot\n lcplt = FermiPlot(savepath='', xsize=8.5, ysize=6)\n lcplt.figname = os.path.join(self.workpath, 'LightCurve.pdf')\n lcplt.xlabel = r'Time (MJD $-$ {})'.format(tref)\n lcplt.ylabel = [r'Flux ($10^{%d}$ ph\\,cm$^{-2}$\\,s$^{-1}$)'%(int(np.log10(scale))), r'TS']\n lcplt.hline = [None, self.tsmin]\n deltaY = max(np.concatenate((flux+fluxerr, upperl), axis=0)) - min(np.concatenate((flux-fluxerr, upperl), axis=0))\n lcplt.ymin = [(min(np.concatenate((flux-fluxerr, upperl-upperl*0.1), axis=0)) - 0.05*deltaY) / scale, min(ts) - 0.05*(max(ts)-min(ts))]\n lcplt.ymax = [(max(np.concatenate((flux+fluxerr, upperl), axis=0)) + 0.05*deltaY) / scale, max(ts) + 0.05*(max(ts)-min(ts))]\n deltaX = (timeMJD[-1] + lcTab['mjderr'][-1]) - (timeMJD[0] - lcTab['mjderr'][0]) \n lcplt.xmin = timeMJD[0] - lcTab['mjderr'][0] - 0.05*deltaX\n lcplt.xmax = timeMJD[-1] + lcTab['mjderr'][-1] + 0.05*deltaX\n lcplt.fill = [item for sublist in zip( timeMJD[detect]-lcTab['mjderr'][detect], timeMJD[detect]+lcTab['mjderr'][detect] ) for item in sublist]\n lcplt.shadecol= self.loran \n if len(flux) == 0:\n lcplt.mksize = [2, 2]\n lcplt.ymode = ['linear', 'linear']\n lcplt.color = ['gray', 'black']\n lcplt.prop = [3, 1]\n lcplt.limit = [True, False]\n lcplt.multiplot(x = [ timeMJD[undet], timeMJD ],\n y = [ upperl/scale, ts ],\n xerr = [ lcTab['mjderr'][undet], lcTab['mjderr']],\n yerr = [ upperl/scale*0.1, None])\n else:\n lcplt.mksize = [2, 2, 2]\n lcplt.ymode = ['linear', 'linear', 'linear']\n lcplt.color = ['gray', 'black', 'black']\n lcplt.prop = [3, 1]\n lcplt.limit = [[True, False], False]\n lcplt.multiplot(x = [ [timeMJD[undet], timeMJD[detect]], timeMJD ],\n y = [ [upperl/scale, flux/scale], ts ],\n xerr = [ [lcTab['mjderr'][undet], lcTab['mjderr'][detect]], lcTab['mjderr']],\n yerr = [ [upperl/scale*0.1, fluxerr/scale], None])\n lcplt.save()\n\n print(\"\\t=== Figure '{}' created ===\".format(lcplt.figname)) \n return",
"def show_lightcurve(self):\n\n time_array = self.exp_start_times\n\n fig = plt.figure()\n\n if self.transmission_spectroscopy:\n lc_model = self.generate_lightcurves(time_array,\n self.planet.calcTransitDepth())\n plt.ylabel(\"Transit Depth\")\n else:\n lc_model = np.ones_like(time_array)\n plt.ylabel(\"Unit Flux\")\n\n if self._visit_trend:\n trend_model = self._visit_trend.scale_factors\n # have to convert weird model format to flat array\n lc_model = trend_model * lc_model.T[0]\n\n plt.scatter(time_array, lc_model)\n plt.xlabel(\"Time (JD)\")\n plt.title(\"Normalised White Time Series of observation\")\n\n return time_array, lc_model",
"def interpolate_lightcurve(light_curve, samples_per_frame_time, frame_time):\n time_units = light_curve['times'].unit\n flux_units = light_curve['fluxes'].unit\n divisor = samples_per_frame_time - 1.\n points = np.arange(light_curve['times'][0].value, light_curve['times'][-1].value, frame_time/divisor)\n light_curve[\"fluxes\"] = np.interp(points, light_curve['times'].value, light_curve['fluxes'].value) * flux_units\n light_curve[\"times\"] = points * time_units\n return light_curve",
"def psf_lightcurve(psf, ld_coeffs, rp, time, tmodel, plot=False):\n # Expand to shape of time axis\n flux = np.tile(psf, (len(time), 1, 1))\n\n # If there is a transiting planet...\n if ld_coeffs is not None and rp is not None and str(type(tmodel)) == \"<class 'batman.transitmodel.TransitModel'>\":\n\n # Set the wavelength dependent orbital parameters\n tmodel.u = ld_coeffs\n tmodel.rp = rp\n\n # Generate the light curve for this pixel\n lightcurve = tmodel.light_curve(tmodel)\n\n # Scale the flux with the lightcurve\n flux *= lightcurve[:, None, None]\n\n return flux",
"def generate_lightcurves(self, time_array, depth=False):\n\n # TODO (ryan) quick check if out of transit, in that case ones!\n\n # TODO (ryan) should this be in generate exposure?\n\n planet = self.planet\n star = self.planet.star\n\n P = float(planet.P.rescale(pq.day))\n a = float((planet.a / star.R).simplified)\n rp = float((planet.R / star.R).simplified)\n i = float(planet.i.rescale(pq.deg))\n e = planet.e\n W = float(planet.periastron)\n transittime = float(planet.transittime)\n\n if np.isnan(W):\n W = 0\n\n time_array = time_array.to(u.day).value\n # model for each resolution element.\n\n if depth:\n planet_spectrum = np.array(\n [depth]) # an array as we want to perform ndim later.\n else:\n planet_spectrum = self.planet_spectrum\n\n models = np.zeros((len(time_array), len(planet_spectrum)))\n\n planet_spectrum = np.sqrt(\n planet_spectrum) # pylc wants Rp/Rs not transit depth\n\n time_array = tools.jd_to_hjd(time_array,\n planet) # pylc wants hjd not jd\n\n logger.debug(\n \"Generating lightcurves with P={}, a={}, i={}, e={}, W={}, T14={},\"\n \" mean_depth={}\".format(\n P, a, i, e, W, transittime, np.mean(planet_spectrum)\n ))\n\n for j, spec_elem in enumerate(planet_spectrum):\n models[:, j] = pylc.transit('claret', self.ldcoeffs, spec_elem, P, a, e, i,\n W, transittime, time_array) - \\\n (\n 1. - pylc.eclipse(spec_elem ** 2, rp, P, a, e,\n i, W,\n transittime, time_array))\n\n return models",
"def lightcurve(t, lat, lon, max_l, coefficients,\n initial_star_lon, initial_obs_lon, omega_orb, omega_rot):\n albedo = albedo_map(lat, lon, max_l, coefficients)\n\n star_lat, star_lon, obs_lat, obs_lon = geometry(t, initial_star_lon,\n initial_obs_lon, omega_orb,\n omega_rot)\n\n K = diffuse_kernel(lat, lon, star_lat, star_lon, obs_lat, obs_lon)\n\n flux = np.sum(albedo[..., np.newaxis] * K * np.sin(lat)[..., np.newaxis],\n axis=(0, 1))\n\n return flux",
"def light_curve(num_energies, num_samples):\n fixed_header = (\n 1*8 # SSID\n + 4*8 # SCET Coarse time\n + 2*8 # SCET Fine time\n + 2*8 # Integration time\n + 4*8 # Detector mask\n + 4 # spare\n + 12 # Pixel mask\n + 1 # spare\n + 1 # Comp Schema light curve S\n + 3 # Comp Schema light curve K\n + 3 # Comp Schema light curve M\n + 1 # Comp Schema trigger S\n + 3 # Comp Schema trigger K\n + 3 # Comp Schema trigger M\n + 1 # Energy bin mask upper boundary\n + 4*8 # Energy bin mask lower boundray\n + 1*8 # Number of energies\n + num_energies*2*8 # Number data points\n + 2*8 # Number of data points\n + 2*8 # Number of data point\n )\n\n variable = (\n + num_energies*num_samples*8 # Compressed light curves\n + num_samples*8 # Compressed triggers\n + num_samples*8 # RCR\n )\n\n return fixed_header, variable",
"def plot_lc(self, ax=None):\n import matplotlib.pyplot as plt\n\n if not any(self.flux):\n raise ValueError('Plotting the light curve is only possible when the '\n 'Object LightCurve is instantiated with time and flux')\n ax = ax or plt.gca()\n ax.plot(self.time, self.flux, 'k.-', label='Obs.', zorder=0)\n if any(self.model):\n ax.plot(self.time, self.model, 'r-', label='Model', zorder=2)\n ax.scatter(self.time, self.model, s=50, facecolors='none', edgecolors='r', zorder=3)\n ax.set_xlabel('Time [seconds]', fontsize=20)\n ax.set_ylabel('Relative Flux', fontsize=20)\n ax.legend()",
"def check_lightcurve_time(light_curve, exposure_time, frame_time):\n logger = logging.getLogger('mirage.seed_image.tso.check_lightcurve_time')\n\n times = copy.deepcopy(light_curve[\"times\"].value)\n fluxes = copy.deepcopy(light_curve[\"fluxes\"].value)\n time_units = light_curve[\"times\"].unit\n flux_units = light_curve[\"fluxes\"].unit\n adjusted = False\n\n # Remove elements where time < 0.\n if np.min(times) < 0.:\n positive_times = times >= 0.\n times = times[positive_times]\n fluxes = fluxes[positive_times]\n adjusted = True\n\n # If the times begin at values significantly > 0,\n # then add entries to bring the start back to time = 0\n if np.min(times) > 0.:\n logger.info((\"Lightcurve time values do not start at zero. Prepending an entry with time=0 \"\n \"and flux = 1.\"))\n times = np.insert(times, 0, 0.)\n fluxes = np.insert(fluxes, 0, 1.)\n adjusted = True\n\n # If the ending time is less than the exposure's total\n # observation time, then add entries with flux=1\n if np.max(times) < exposure_time:\n logger.info((\"Lightcurve time values extend only to {} seconds. This is not long enough \"\n \"to cover the entire exposure time of {} seconds. Extending to cover the full \"\n \"exposure time with flux = 1.\".format(np.max(times), exposure_time)))\n times = np.append(times, exposure_time + 5 * frame_time)\n fluxes = np.append(fluxes, 1.)\n adjusted = True\n\n if adjusted:\n light_curve[\"times\"] = times * time_units\n light_curve[\"fluxes\"] = fluxes * flux_units\n\n return light_curve",
"def lightcurve(self):\n return NGCLightCurve(self['corotid'])",
"def plotallbands(_zband, _yband, _jband, _hband, _kband, _period):\n # Set pyplot style to be consisten within the program\n plt.style.use('seaborn-whitegrid')\n # Frequency = 1 / Period\n _freq = 1 / _period\n\n # Create single dataset from all bands\n _bands = [_zband, _yband, _jband, _hband, _kband]\n # Iterate through each band and plot to screen\n i = 0\n while i < 5:\n # Array to set colours for each band\n _colours = ['-b', '-g', '-r', '-c', '-m']\n # Array to set strings for graph legend\n _legend = ['Z-band', 'Y-band', 'J-band', 'H-band', 'K-band']\n # Determine the line of best fit for each band\n _xfit, _lobf = calclobf(_bands[i], _period)\n # Plot the data in the array to screen, lightly coloured and z rank behind the line of best fit\n plt.plot(_xfit, _lobf, _colours[i], lw=1, zorder=2, label=_legend[i])\n i += 1\n\n # Set x-axis limit to a single period\n plt.xlim(0, 1)\n # Set graph and axis titles\n plt.xlabel(\"Phase\")\n plt.ylabel(\"Magnitude\")\n plt.title(\"Folded light curve\")\n # Show the legend\n plt.legend()\n # Invert y-axis as convention\n plt.gca().invert_yaxis()\n # Save to current folder\n plt.savefig('curve.png')\n # Display to screen\n plt.show()",
"def one_transition_spectrum_ld(self,tr):\n \n\n ta = tr[\"ta\"] # TimeAxis\n ld = tr[\"ld\"] # linear dichroism strength\n om = tr[\"om\"] # frequency - rwa\n gg = tr[\"gg\"] # natural broadening (constant or time dependent)\n fwhm = tr[\"fwhm\"] # Additional gaussian broadening of the spectra\n sgm = fwhm/(2*numpy.sqrt(2*numpy.log(2)))\n \n if self.system._has_system_bath_coupling:\n# ct = tr[\"ct\"] # correlation function\n \n # convert correlation function to lineshape function\n #gt = self._c2g(ta,ct.data)\n gt = tr[\"gt\"]\n # calculate time dependent response\n at = numpy.exp(-gt -1j*om*ta.data)\n else:\n # calculate time dependent response\n at = numpy.exp(-1j*om*ta.data) \n \n \n if len(gg) == 1:\n gam = gg[0]\n rt = numpy.exp(gam*ta.data)\n at *= rt\n #print(\"Constant: \", rt[20], len(at))\n else:\n rt = numpy.exp((gg)*ta.data) \n at *= rt\n #print(\"Time dependent: len = \", rt[20], len(rt))\n \n if fwhm!=0.0:\n gauss = numpy.exp(-2*(numpy.pi**2)*(sgm**2)*(ta.data**2))\n at *= gauss\n \n # Fourier transform the result\n ft = ld*numpy.fft.hfft(at)*ta.step\n ft = numpy.fft.fftshift(ft)\n # invert the order because hfft is a transform with -i\n ft = numpy.flipud(ft) \n # cut the center of the spectrum\n Nt = ta.length #len(ta.data) \n return ft[Nt//2:Nt+Nt//2]",
"def derive_RiekeLebofsky(wavelength):\n filters = ['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K', 'L', 'M', \n '[8.0]', '[8.5]', '[9.0]', '[9.5]', '[10.0]', '[10.5]', \n '[11.0]', '[11.5]', '[12.0]', '[12.5]', '[13.0]']\n #wave = np.array([0.365, 0.445, 0.551, 0.658, 0.806, 1.25, 1.635, 2.2, \n # 3.77, 4.68, 4.75, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0,\n # 11.5, 12.0, 12.5, 13.0])\n \n # Wavelengths from Nishiyama+09 plot of RL+85 law...slightly different than standard, \n # drop N filter\n wave = np.array([0.365, 0.445, 0.551, 0.658, 0.806, 1.17, 1.57, 2.12, \n 3.40, 4.75, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0,\n 11.5, 12.0, 12.5, 13.0])\n A_Av = np.array([1.531, 1.324, 1.00, 0.748, 0.482, 0.282, 0.175, 0.112,\n 0.058, 0.023, 0.02, 0.043, 0.074, 0.087, 0.083,\n 0.074, 0.060, 0.047, 0.037, 0.030, 0.027])\n # Want to change this from A/Av to A/AK\n k_ind = np.where(np.array(filters) == 'K')\n Ak_Av = A_Av[k_ind]\n Av_Ak = 1.0 / Ak_Av\n\n A_Ak = A_Av * Av_Ak\n \n # Interpolate over the curve\n spline_interp = interpolate.splrep(wave, A_Ak, k=3, s=0)\n A_Ak_at_wave = interpolate.splev(wavelength, spline_interp)\n\n return A_Ak_at_wave",
"def make_lightcurve(t0, r, i, p, width, u_type, u_param, t):\n # Init batman model\n params = batman.TransitParams()\n params.rp = r\n params.inc = i\n params.w = 0 # longitude of periastron (degenerate with width)\n params.ecc = 0 # eccentricity (0 for circular orbits)\n params.per = p # orbital period\n params.t0 = t0\n params.a = (width * p ** 2) ** (1 / 3) # semi-major axis (stellar radii)\n params.limb_dark = u_type\n params.u = u_param\n model = batman.TransitModel(params, t)\n \n # Generate curve\n flux = model.light_curve(params) # compute light curve\n return flux",
"def getldcoeffs(Teff, logg, z, Tefferr, loggerr, zerr, law, channel, quiet = False):\n\n if not quiet: print \"\\nInterpolating {} limb darkening coefficients for {}...\".format(law,channel)\n # Paths where the tables are stored\n ch1path = \"{}/PhD/code/LD3.6Spitzer.txt\".format(os.getenv('HOME'))\n ch2path = \"{}/PhD/code/LD4.5Spitzer.txt\".format(os.getenv('HOME'))\n\n # Read in the required table\n if channel == 'ch1':\n table = np.genfromtxt(ch1path, skip_header=13, dtype=float, encoding = None)\n elif channel == 'ch2':\n table = np.genfromtxt(ch2path, skip_header=13, dtype=float, encoding = None)\n\n # 3D array of discrete values of teff, logg and z\n points = np.array([table.T[0], table.T[1], table.T[2]]).T\n\n if law == \"linear\": index = [0]\n elif law == \"quadratic\": index = [1,2]\n elif law == \"nonlinear\": index = [6,7,8,9]\n else: pass\n\n coeffs = np.zeros(len(index))\n coeffs_err = np.zeros(len(index))\n\n for i in range(len(index)):\n # All possible values of desired limb darkening coefficient (indexed)\n values = table.T[3+index[i]]\n # 3D Interpolates\n interp = LinearNDInterpolator(points,values)\n coeffs[i] = interp.__call__([Teff,logg,z])\n\n # Estimate the error on the interpolated result based on errors Teff,logg,z\n coeffsTU = interp.__call__(np.array([Teff+Tefferr,logg,z]))\n coeffsTL = interp.__call__(np.array([Teff-Tefferr,logg,z]))\n coeffsgU = interp.__call__(np.array([Teff,logg+loggerr,z]))\n coeffsgL = interp.__call__(np.array([Teff,logg-loggerr,z]))\n coeffszU = interp.__call__(np.array([Teff,logg,z+zerr]))\n coeffszL = interp.__call__(np.array([Teff,logg,z-zerr]))\n\n coeffs_err[i] = np.sqrt( ((coeffsTU - coeffsTL)/2.)**2 + ((coeffsgU - coeffsgL)/2.)**2 + ((coeffszU - coeffszL)/2.)**2 )\n\n if not quiet: print \"\\t Coeff(s): {}\".format(coeffs)\n if not quiet: print \"\\t Coeff Err(s): {}\".format(coeffs_err)\n\n return coeffs.tolist(), coeffs_err.tolist()",
"def make_lightcurve(centroids, bands, band_idx, box_size, aperture_radius):\n band_names = np.sort(list(bands.keys()))\n num_stars= range(len(centroids))\n for star_idx in num_stars:\n xcenters, ycenters = [],[]\n aperture_sums = []\n background = []\n fwhms = []\n obs_time = []\n obs_mjd = []\n ##extract lightcurve (enumerate all frames) in a given band\n for i in tqdm(bands[band_names[band_idx]]):\n #import pdb; pdb.set_trace()\n hdr = fits.open(i)[0].header\n img = fits.open(i)[0].data\n #get dates from fits header\n date=dt.strptime(hdr['DATE-OBS'], '%Y-%m-%d')\n time=dt.strptime(hdr['EXP-STRT'], '%H:%M:%S.%f')\n newdate = time.replace(year=date.year, month=date.month, day=date.day)\n obs_time.append(newdate)\n obs_mjd.append(hdr['MJD-STRT'])\n\n #crop\n #import pdb; pdb.set_trace()\n image_crop = get_crop(img, centroids[star_idx], box_size)\n\n ###aperture photometry###\n #compute centroid\n centroid = get_centroid(image_crop)\n\n xcenters.append(centroid[0])\n ycenters.append(centroid[1])\n\n #compute backgound\n bkg_mean=get_bkg(image_crop, centroid, r_in=20., r_out=30.)\n\n #measure fwhm\n fwhm=get_fwhm(image_crop)\n\n #without aperture photometry\n\n aperture_sum = get_phot(image_crop, centroid, r=aperture_radius)\n\n #minus background wihtin annulus\n #aperture_sum = get_phot2(image_crop,bkg_mean,centroid,r=aperture_radius)\n\n aperture_sums.append(aperture_sum)\n background.append(bkg_mean)\n\n # if fwhm < 10*np.median(fwhms):\n # fwhms.append(fwhm)\n # else:\n # fwhms.append(np.nan)\n fwhms.append(fwhm)\n\n #output as dataframe of given band and star\n\n dfs.append(pd.DataFrame(\n {'{0}_{1}_x'.format(band_names[band_idx], str(star_idx)) : xcenters,\n '{0}_{1}_y'.format(band_names[band_idx], str(star_idx)) : ycenters,\n '{0}_{1}_flux_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : aperture_sums,\n '{0}_{1}_bkg_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : background,\n '{0}_{1}_fwhm_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : fwhms},\n #'airmass' : airmass\n index = obs_time))\n return dfs, band_idx, band_names",
"def ReadLightCurve(KeplerID):\n \n if inSource(KeplerID):\n db = MySQLdb.connect(host='tddb.astro.washington.edu', user='tddb', passwd='tddb', db='Kepler')\n cursor = db.cursor()\n foo = 'select * from source where (KEPLERID = %s)' % (KeplerID)\n cursor.execute(foo)\n results = cursor.fetchall()\n \n # reading time, corrected flux and flux errors\n time = num.ma.array([x[2] for x in results])\n corflux = num.ma.array([x[7] for x in results])\n corerr = num.ma.array([x[8] for x in results])\n \n idx = num.where((corflux>0)&(corerr>0))\n \n time = time[idx]\n corflux = corflux[idx]\n corerr = corerr[idx]\n \n return {'kid':KeplerID,'x':time,'y':corflux,'yerr':corerr}\n else:\n print 'Kepler ID %s not found in Kepler.source' % (KeplerID)\n return",
"def mel_spectrogram(self, y):\n # assert(torch.min(y.data) >= -1)\n # assert(torch.max(y.data) <= 1)\n\n magnitudes, phases = self.stft_fn.transform(y)\n # magnitudes = magnitudes.data\n mel_output = torch.matmul(self.mel_basis, magnitudes)\n mel_output = self.spectral_normalize(mel_output)\n return mel_output",
"def lcfig( z=7 ):\n\n for modelname in ['z15G','z25G','z40G'] :\n\n # initialize a supernova model :\n snmodel = sncosmo.Model(source=modelname)\n\n # Fix the redshift for this instantiation of the model\n # (NOTE: this does not apply any cosmological dimming. It only\n # shifts the wavelengths)\n snmodel.set( z = z )\n\n # generate the H band light curve\n tobs = np.arange( 0, 1000, 10 )\n M160 = snmodel.bandmag( 'f160w', 'ab', tobs ) # Absolute Magnitude\n m160 = M160 + cosmo.distmod( z ).value # apparent magnitude\n\n pl.plot( tobs, m160 )\n ax = pl.gca()\n ax.invert_yaxis()\n ax.set_xlabel('Time (observer-frame days)')\n ax.set_ylabel('Apparent Magnitude in F160W')\n ax.set_xlim( 0, 1000 )\n ax.set_ylim( 36, 28 )\n\n pl.draw()",
"def mel_spectrogram(self, y):\n assert torch.min(y.data) >= -1\n assert torch.max(y.data) <= 1\n magnitudes, phases = self.stft_fn.transform(y)\n magnitudes = magnitudes.data\n mel_output = torch.matmul(self.mel_basis, magnitudes)\n mel_output = self.spectral_normalize(mel_output)\n energy = torch.norm(magnitudes, dim=1)\n return mel_output, energy",
"def model_lightcurve(transit_times, period, t, model_transit_single):\r\n\r\n # Append one more transit after and before end of nominal time series\r\n # to fully cover beginning and end with out of transit calculations\r\n earlier_tt = transit_times[0] - period\r\n extended_transit_times = numpy.append(earlier_tt, transit_times)\r\n next_tt = transit_times[-1] + period\r\n extended_transit_times = numpy.append(extended_transit_times, next_tt)\r\n full_x_array = numpy.array([])\r\n full_y_array = numpy.array([])\r\n rounds = len(extended_transit_times)\r\n internal_samples = (\r\n int(len(t) / len(transit_times))\r\n ) * tls_constants.OVERSAMPLE_MODEL_LIGHT_CURVE\r\n\r\n # Append all periods\r\n for i in range(rounds):\r\n xmin = extended_transit_times[i] - period / 2\r\n xmax = extended_transit_times[i] + period / 2\r\n x_array = numpy.linspace(xmin, xmax, internal_samples)\r\n full_x_array = numpy.append(full_x_array, x_array)\r\n full_y_array = numpy.append(full_y_array, model_transit_single)\r\n\r\n if numpy.all(numpy.isnan(full_x_array)):\r\n return None, None\r\n else: # Determine start and end of relevant time series, and crop it\r\n start_cadence = numpy.nanargmax(full_x_array > min(t))\r\n stop_cadence = numpy.nanargmax(full_x_array > max(t))\r\n full_x_array = full_x_array[start_cadence:stop_cadence]\r\n full_y_array = full_y_array[start_cadence:stop_cadence]\r\n model_lightcurve_model = full_y_array\r\n model_lightcurve_time = full_x_array\r\n return model_lightcurve_model, model_lightcurve_time",
"def calc_lamb(self, x_surface, geom):\n\n return self.rfl",
"def read_rbn_lightcurve(lc_file,log):\n\n if path.isfile(lc_file):\n\n lines = open(lc_file, 'r').readlines()\n\n imnames = []\n hjd = []\n cal_mag = []\n cal_mag_err = []\n\n for l in lines:\n\n if l[0:1] != '#':\n\n entries = l.replace('\\n','').split()\n\n imnames.append( str(entries[0]).replace('.fits','').replace('_crop','') )\n hjd.append( float(entries[1]) )\n cal_mag.append( float(entries[8]) )\n cal_mag_err.append( float(entries[9]) )\n\n lc = Table()\n lc['images'] = imnames\n lc['hjd'] = hjd\n lc['mag'] = cal_mag\n lc['mag_err'] = cal_mag_err\n\n log.info('Read data for lightcurve '+lc_file)\n\n else:\n log.info('ERROR: Cannot access lightcurve file '+lc_file)\n\n lc = Table()\n\n return lc",
"def mel_spectrogram(self, y):\n if isinstance(y, np.ndarray):\n y = torch.from_numpy(y).float()\n y = y.unsqueeze(0)\n y = torch.autograd.Variable(y, requires_grad=False)\n\n assert (torch.min(y.data) >= -1)\n assert (torch.max(y.data) <= 1)\n\n magnitudes, phases = self.stft_fn.transform(y)\n magnitudes = magnitudes.data\n mel_output = torch.matmul(self.mel_basis, magnitudes)\n mel_output = self.spectral_normalize(mel_output)\n return torch.squeeze(mel_output, 0).detach().cpu().numpy().T",
"def plotblackbody(_zband, _yband, _jband, _hband, _kband, _parallax, _perr):\n # Set pyplot style to be consistent within the program\n plt.style.use('seaborn-whitegrid')\n # Import raw data to plot Hertzsprung-Russell diagram\n _hrdata = inithr('hr.dat')\n # Determine distance in parsecs\n _distance = 1 / np.tan(_parallax * 10**-3)\n _derr = (_perr * 10**-3) / ((_parallax * 10**-3)**2)\n # Create single data array with all bands\n _bands = [_zband, _yband, _jband, _hband, _kband]\n _lambda = [0.9, 1.02, 1.22, 1.63, 2.2]\n # Set up empty arrays for each star\n _largestar = np.zeros((1, 2))\n _smallstar = np.zeros((1, 2))\n\n # Determine the spectral flux density from the large star\n i = 0\n while i < 5:\n # Determine the maximum and minimum values of the observed band\n _max, _min = lightcurve.maxminvals(_bands[i])\n # The large star uses the maximum flux value (smallest magnitude)\n _largestar = np.append(_largestar, np.array([_lambda[i], (magtoflux(_min, i))], ndmin=2), axis=0)\n i += 1\n # Delete first empty row of the array\n _largestar = np.delete(_largestar, 0, axis=0)\n\n # Determine the spectral flux density from the small star\n i = 0\n while i < 5:\n # Determine the maximum and minimum values of the observed band\n _max, _min = lightcurve.maxminvals(_bands[i])\n # Smaller star flux value is combined value minus the large star\n _smallstar = np.append(_smallstar, np.array([_lambda[i], (magtoflux(_max, i) -\n magtoflux(_min, i))], ndmin=2), axis=0)\n i += 1\n # Delete the first empty row of the array\n _smallstar = np.delete(_smallstar, 0, axis=0)\n\n # Determine the luminosity and effective temperature of each star\n _luma, _lumaerr, _wiena = getwientemp(_largestar, _distance, _derr, 1)\n _lumb, _lumberr, _wienb = getwientemp(_smallstar, _distance, _derr, 2)\n\n # Calculate luminosities in solar units\n _solluma = _luma / (3.828*10**26)\n _sollumb = _lumb / (3.828*10**26)\n _lumaerr = _lumaerr / (3.828*10**26)\n _lumberr = _lumberr / (3.828*10**26)\n\n # Calculate masses using the mass/luminosity relation in solar mass units\n # N.B. only works as an approximation for main sequence stars, giants and dwarfs are not sutiable for this\n # approximation\n _solmassa = np.power(_solluma, 1/3.5)\n _solmassaerr = ((_solmassa * (1/3.5) * _lumaerr) / _solluma)**2\n _solmassb = np.power(_sollumb, 1/3.5)\n _solmassberr = ((_solmassb * (1 / 3.5) * _lumberr) / _sollumb) ** 2\n\n # Calculate stellar radius in solar radii using the relationship between luminosity, surface area and temperature\n _solrada = np.sqrt(_solluma / np.power(_wiena / 5778, 4))\n _solradb = np.sqrt(_sollumb / np.power(_wienb / 5778, 4))\n _solradaerr = ((_solrada * 0.5 * _lumaerr) / _solluma)**2\n _solradberr = ((_solradb * 0.5 * _lumberr) / _sollumb)**2\n\n # Output determined values to the screen and write to file\n print('Values for the large star:')\n print('Effective temperature: ' + str(round_sig(_wiena)))\n print('Solar luminosities: ' + str(round_sig(_solluma)) + ', error: ' + str(round_sig(_lumaerr)))\n print('Solar radii: ' + str(round_sig(_solrada)) + ', error: ' + str(round_sig(_solradaerr)))\n print('Solar masses: ' + str(round_sig(_solmassa)) + ', error: ' + str(round_sig(_solmassaerr)))\n print('-----------------------------------------------------')\n print('Values for the small star:')\n print('Effective temperature: ' + str(round_sig(_wienb)))\n print('Solar luminosities: ' + str(round_sig(_sollumb)) + ', error: ' + str(round_sig(_lumberr)))\n print('Solar radii: ' + str(round_sig(_solradb)) + ', error: ' + str(round_sig(_solradberr)))\n print('Solar masses: ' + str(round_sig(_solmassb)) + ', error: ' + str(round_sig(_solmassberr)))\n\n # Convert from luminosity to magnitude in solar units\n _luma = -2.5 * np.log10(_luma / (3.0128 * 10**28))\n _lumb = -2.5 * np.log10(_lumb / (3.0128 * 10**28))\n\n # Plot Hertzsprung-Russell diagram using provided array\n plt.scatter(_hrdata[:, 1], _hrdata[:, 0], s=0.5)\n # Plot determined values for each star\n plt.scatter(_wiena, _luma, s=16, c='red', label='Larger Star')\n plt.scatter(_wienb, _lumb, s=16, c='green', label='Smaller Star')\n # Set the x and y axis limits to sensible values\n plt.legend()\n plt.xlim(3000, 10000)\n plt.ylim(-10, 20)\n # Invert both axes as convention\n plt.gca().invert_xaxis()\n plt.gca().invert_yaxis()\n # Save figure to current folder\n plt.savefig('hr.png')\n # Display to screen\n plt.show()",
"def round_L_zi(self, y):\n round_L_a = self.round_L_z(y) * self.w.T\n return self.layer_z * round_L_a",
"def laea_y(lamb, phi):\n return np.sqrt(2) * np.sin(phi) / np.sqrt(1 + np.cos(phi) * np.cos(lamb / 2))",
"def lombs(x, y):\n # Calculate curvature. \n curv = curvature(x, y)\n steps = np.sqrt(np.diff(x, axis=0)**2 + np.diff(y, axis=0)**2)[:-1]\n arc = np.cumsum(steps)\n # Calculate LS.\n ls_f, ls_p = LombScargle(arc, curv).autopower()\n return ls_f, ls_p",
"def plot_lightcurve(t, lc, lcerr, popt, coeffs_dict, coeffs_tuple, fix_coeffs, batman_params, sys_params,\n x = None, y = None, Pns = None, errors = False, binsize = 50,\n name = None, channel = None, orbit = None, savefile = False, TT_hjd = None,\n method = 'PLD', color = 'r', scale = None, filext = None, foldext = '',\n showCuts = False, ncutstarts = None, cutstartTime = None,\n cutends = False, eclipse = False, extraoutputs = False):\n # dictionary to save the reduced data, the model, the corrected data, the corrected model\n plottingdict = {}\n\n binsize = int(binsize)\n\n if name == None:\n warnings.warn( \"What planetary system are we looking at!? -- setting to 'unknown'\" )\n name = 'unknown'\n if channel == None:\n warnings.warn( \"What channel are we looking at!? -- setting to 'unknown'\" )\n channel = 'unknown'\n\n if method == 'poly':\n\n transit, F, ramp = model_poly(popt, t, x, y, coeffs_dict, coeffs_tuple, fix_coeffs, batman_params, sys_params, components = True, eclipse = eclipse)\n optflux = transit*F*ramp\n\n # Correct the lightcurve and bin the data and the optimum values\n corrected_data = lc / (F*ramp)\n binned_data = custom_bin(corrected_data, binsize)\n binned_opt = custom_bin(transit, binsize)\n binned_times = custom_bin(t, binsize)\n\n # Calculate the residuals\n residuals = lc - optflux\n rms = np.sqrt(np.sum(residuals**2)/len(residuals))\n chi2 = chi(popt, t, lc, lcerr, coeffs_dict, coeffs_tuple, fix_coeffs, batman_params, sys_params, x=x, y=y, method = 'poly', eclipse = eclipse)/(len(lc)-len(popt))\n bic = BIC(popt, t, lc, lcerr, coeffs_dict, coeffs_tuple, fix_coeffs, batman_params, sys_params, x=x, y=y, method = 'poly', eclipse = eclipse)\n\n binned_residuals = []\n for i in range(len(binned_data)):\n binned_residuals.append(binned_data[i] - binned_opt[i])\n\n # Make the plot, 3 plots in one.\n fig = plt.figure(figsize=(15, 6))\n #plt.title(\"{0}: rms={1}, red_chi2={2}, BIC={3}\".format(name, rms, chi2, bic))\n frame1=fig.add_axes((.1,.6,.8,.4))\n frame1.axes.get_xaxis().set_visible(False)\n frame2=fig.add_axes((.1,.2,.8,.4))\n frame2.axes.get_xaxis().set_visible(False)\n frame3=fig.add_axes((.1,.0,.8,.2))\n\n frame1.plot(t, lc*scale, 'ko', markersize=2, label='Raw data')\n frame1.plot(t, optflux*scale, color, label='Best fit poly model', linewidth =2)\n frame1.set_title(\"{0} - {1} - {2} lightcurve\".format(name, channel, orbit))\n frame1.set_ylabel(\"Raw [e-]\")\n frame1.legend(loc = 'best')\n\n\n frame2.plot(binned_times, binned_data, 'ko', markersize = 4, label='Binned data (x{})'.format(binsize))\n frame2.plot(binned_times, binned_opt, color = color, label='Best fit transit model')\n frame2.set_ylabel(\"Corrected & Normalised\")\n frame2.legend(loc = 'lower left')\n frame2.annotate('RMS={0:.3e}\\n'.format(rms) + r'$\\chi_{red}^2$' + '={0:.3e} \\nBIC={1:.3e}'.format(chi2, bic),\n xy=(0.85, 0.2), xycoords='axes fraction',bbox={'facecolor':color, 'alpha':0.5, 'pad':10})\n\n\n frame3.plot(binned_times,binned_residuals, 'ko', markersize = 4)\n frame3.axhline(0, color = color)\n frame3.set_ylabel(\"Residuals\")\n\n plottingdict['Times'] = t\n plottingdict['Raw Data'] = lc*scale\n plottingdict['Full Poly Model'] = optflux*scale\n\n plottingdict['Corrected Data'] = lc / (F*ramp)\n plottingdict['Transit Model'] = transit\n plottingdict['Temporal Ramp'] = ramp\n plottingdict['Poly'] = F\n\n\n if showCuts:\n for j in range(ncutstarts):\n if cutends:\n frame1.axvline(t[-1] - j*(float(cutstartTime)/(60.*24.)), color = 'k', ls='dashed')\n frame2.axvline(t[-1] - j*(float(cutstartTime)/(60.*24.)), color = 'k', ls='dashed')\n frame3.axvline(t[-1] - j*(float(cutstartTime)/(60.*24.)), color = 'k', ls='dashed')\n else:\n frame1.axvline(j*(float(cutstartTime)/(60.*24.)), color = 'k', ls='dashed')\n frame2.axvline(j*(float(cutstartTime)/(60.*24.)), color = 'k', ls='dashed')\n frame3.axvline(j*(float(cutstartTime)/(60.*24.)), color = 'k', ls='dashed')\n\n plt.xlabel(\"Time [bjd]\")\n\n if savefile:\n plt.savefig(\"{5}/PhD/SpitzerTransits/{0}{4}/{0}_{1}_{2}_Poly_lc_{3}.png\".format(name, orbit, channel, filext, foldext, os.getenv('HOME')),bbox_inches='tight')\n plt.close()\n else:\n plt.show()\n plt.close()\n\n elif method == 'PLD':\n\n DE, pixels, ramp = model_PLD(popt, t, Pns, coeffs_dict, coeffs_tuple, fix_coeffs, batman_params, sys_params, components = True, eclipse = eclipse)\n\n optflux = DE + pixels + ramp\n\n # Correct the lightcurve and bin the data and the optimum values\n corrected_data = lc - pixels - ramp\n binned_data = custom_bin(corrected_data, binsize)\n binned_opt = custom_bin(DE, binsize)\n binned_times = custom_bin(t, binsize)\n\n # Calculate the residuals\n residuals = lc - optflux\n rms = np.sqrt(np.sum(residuals**2)/len(residuals))\n chi2 = chi(popt, t, lc, lcerr, coeffs_dict, coeffs_tuple, fix_coeffs, batman_params, sys_params, Pns=Pns, method = 'PLD', eclipse = eclipse )/(len(lc)-len(popt))\n bic = BIC(popt, t, lc, lcerr, coeffs_dict, coeffs_tuple, fix_coeffs, batman_params, sys_params, Pns=Pns, method = 'PLD', eclipse = eclipse)\n\n binned_residuals = []\n for i in range(len(binned_data)):\n binned_residuals.append(binned_data[i] - binned_opt[i])\n\n # Make the plot, 3 plots in one.\n fig = plt.figure(figsize=(15, 6))\n frame1=fig.add_axes((.1,.6,.8,.4))\n frame1.axes.get_xaxis().set_visible(False)\n frame2=fig.add_axes((.1,.2,.8,.4))\n frame2.axes.get_xaxis().set_visible(False)\n frame3=fig.add_axes((.1,.0,.8,.2))\n\n frame1.plot(t, lc*scale, 'ko', markersize=2, label='Raw data')\n frame1.plot(t, optflux*scale, color, label='Best fit PLD model', linewidth =2)\n frame1.set_title(\"{0} - {1} - {2} lightcurve\".format(name, channel, orbit))\n frame1.set_ylabel(\"Raw [e-]\")\n frame1.legend(loc = 'best')\n\n frame2.plot(binned_times, binned_data, 'ko', markersize = 4, label='Binned data (x{})'.format(binsize))\n frame2.plot(binned_times, binned_opt, color = color, label='Best fit transit model')\n frame2.set_ylabel(\"Corrected & Normalised\")\n frame2.legend(loc = 'lower left')\n frame2.annotate('RMS={0:.3e}\\n'.format(rms) + r'$\\chi_{red}^2$' + '={0:.3e} \\nBIC={1:.3e}'.format(chi2, bic),\n xy=(0.85, 0.2), xycoords='axes fraction',bbox={'facecolor':color, 'alpha':0.5, 'pad':10})\n\n frame3.plot(binned_times,binned_residuals, 'ko', markersize = 4)\n frame3.axhline(0, color = color)\n frame3.set_ylabel(\"Residuals\")\n\n plt.xlabel(\"Time [bjd]\")\n\n plottingdict['Times'] = t\n plottingdict['Raw Data'] = lc*scale\n plottingdict['Full PLD Model'] = optflux*scale\n\n plottingdict['Corrected Data'] = lc - pixels - ramp\n plottingdict['Transit Model'] = DE\n plottingdict['Temporal Ramp'] = ramp\n plottingdict['PLD'] = pixels\n\n if showCuts:\n for j in range(ncutstarts):\n if cutends:\n frame1.axvline(t[-1] - j*(float(cutstartTime)/(60.*24.)), color = 'k', ls='dashed')\n frame2.axvline(t[-1] - j*(float(cutstartTime)/(60.*24.)), color = 'k', ls='dashed')\n frame3.axvline(t[-1] - j*(float(cutstartTime)/(60.*24.)), color = 'k', ls='dashed')\n else:\n frame1.axvline(j*(float(cutstartTime)/(60.*24.)), color = 'k', ls='dashed')\n frame2.axvline(j*(float(cutstartTime)/(60.*24.)), color = 'k', ls='dashed')\n frame3.axvline(j*(float(cutstartTime)/(60.*24.)), color = 'k', ls='dashed')\n\n if savefile:\n plt.savefig(\"{5}/PhD/SpitzerTransits/{0}{4}/{0}_{1}_{2}_PLD_lc_{3}.png\".format(name, orbit, channel,filext,foldext, os.getenv('HOME')),bbox_inches='tight')\n plt.close()\n else:\n plt.show()\n plt.close()\n\n else:\n warnings.warn( \"What model do you want to plot?!\" )\n\n if extraoutputs:\n return plottingdict\n else:\n pass"
]
| [
"0.5951895",
"0.5928167",
"0.5583655",
"0.55513257",
"0.54969233",
"0.5486457",
"0.5382974",
"0.5276939",
"0.5214253",
"0.5212705",
"0.51789206",
"0.5082815",
"0.50349635",
"0.49946722",
"0.4969627",
"0.49492365",
"0.4941575",
"0.49127588",
"0.4905105",
"0.490314",
"0.48845977",
"0.48630723",
"0.48344725",
"0.4834037",
"0.4814377",
"0.48020607",
"0.47598538",
"0.47559136",
"0.4741128",
"0.47168943"
]
| 0.61594796 | 0 |
For a cube containing a spectral dimension, returns a sunpy spectrum. The given coordinates represent which values to take. If they are None, then the corresponding axis is summed. | def slice_to_spectrum(self, *coords, **kwargs):
if 'WAVE' not in self.axes_wcs.wcs.ctype:
raise cu.CubeError(2, 'Spectral axis needed to create a spectrum')
axis = -1 if self.axes_wcs.wcs.ctype[0] == 'WAVE' else -2
pixels = [cu.pixelize(coord, self.axes_wcs, axis) for coord in coords]
item = range(len(pixels))
if axis == -1:
item[1:] = pixels
item[0] = slice(None, None, None)
item = [slice(None, None, None) if i is None else i for i in item]
else:
item[0] = pixels[0]
item[1] = slice(None, None, None)
item[2:] = pixels[1:]
item = [slice(None, None, None) if i is None else i for i in item]
data = self.data[item]
errors = (None if self.uncertainty is None else self.uncertainty[item])
mask = None if self.mask is None else self.mask[item]
for i in range(len(pixels)):
if pixels[i] is None:
if i == 0:
sumaxis = 1 if axis == -1 else 0
else:
sumaxis = 1 if i == 2 else i
data = data.sum(axis=sumaxis)
mask = mask.sum(axis=sumaxis)
kwargs.update({'uncertainty': errors, 'mask': mask})
wavelength_axis = self.wavelength_axis()
freq_axis, cunit = wavelength_axis.value, wavelength_axis.unit
err = self.uncertainty[item] if self.uncertainty is not None else None
kwargs.update({'uncertainty': err})
return Spectrum(np.array(data), np.array(freq_axis), cunit, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_to_spectral_cube(self):\n if self.data.ndim == 4:\n raise cu.CubeError(4, \"Too many dimensions: Can only convert a \" +\n \"3D cube. Slice the cube before converting\")\n if 'WAVE' not in self.axes_wcs.wcs.ctype:\n raise cu.CubeError(2, 'Spectral axis needed to create a spectrum')\n axis = 0 if self.axes_wcs.wcs.ctype[-1] == 'WAVE' else 1\n coordaxes = [1, 2] if axis == 0 else [0, 2] # Non-spectral axes\n newwcs = wu.reindex_wcs(self.axes_wcs, np.arary(coordaxes))\n time_or_x_size = self.data.shape[coordaxes[1]]\n y_size = self.data.shape[coordaxes[0]]\n spectra = np.empty((time_or_x_size, y_size), dtype=Spectrum)\n for i in range(time_or_x_size):\n for j in range(y_size):\n spectra[i][j] = self.slice_to_spectrum(i, j)\n return SpectralCube(spectra, newwcs, self.meta)",
"def sum_spectra(self, wave_range=None, units=u.Angstrom):\n if wave_range is None:\n # Sum over entire wavelength axis and return an NDCube\n sum_data = np.sum(self.data, axis=2)\n new_wcs = self.wcs.dropaxis(0)\n new_meta = copy.deepcopy(self.meta)\n new_meta['notes'].append('Summed over entire wavelength axis.')\n return NDCube(sum_data, new_wcs, meta=new_meta)\n\n # Validate input wavelength range\n if isinstance(wave_range, (list, tuple)):\n use_range = [0, 0]\n range_units = ['unknown', 'unknown']\n print('Summing EISCube spectra over a select wavelength range.')\n if len(wave_range) != 2:\n print('Error: invalid number of wave_range values. Please input'\n +' a list or tuple with exactly two elements.',\n file=sys.stderr)\n return None\n else:\n print('Error: invalid wave_range type. Please input either None or'\n +' a list (or tuple) with two elements.', file=sys.stderr)\n return None\n\n for w in range(2):\n if isinstance(wave_range[w], u.Quantity):\n # Parse an astropy.units.Quantity and convert as needed\n # Note: this will overwrite any inputs to the \"units\" kwarg\n if wave_range[w].unit == u.pix:\n use_range[w] = wave_range[w].value\n range_units[w] = u.pix\n elif wave_range[w].unit.physical_type == 'length':\n use_range[w] = wave_range[w].to('Angstrom').value\n range_units[w] = u.Angstrom\n else:\n print('Error: invalid wavelength unit. Please input a pixel'\n +' or length unit.', file=sys.stderr)\n return None\n else:\n # Assume default or user inputted units (still convert if needed)\n input_units = u.Unit(units)\n if input_units == u.pix:\n use_range[w] = float(wave_range[w])\n range_units[w] = u.pix\n elif input_units.physical_type == 'length':\n u_scale = input_units.to('Angstrom')\n use_range[w] = float(wave_range[w])*u_scale\n range_units[w] = u.Angstrom\n else:\n print('Error: invalid wavelength unit. Please input a pixel'\n +' or length unit.', file=sys.stderr)\n return None\n\n # Check for consistent units\n if range_units[0] != range_units[1]:\n print('Error: mismatched units. Please input the same units for'\n +' both wave_range elements or use the \"units\" keyword',\n file=sys.stderr)\n return None\n\n # If given values of [center, half width], compute the actual range\n if use_range[1] < use_range[0]:\n temp_center = use_range[0]\n temp_half_wid = use_range[1]\n use_range[0] = temp_center - temp_half_wid\n use_range[1] = temp_center + temp_half_wid\n\n # Get indices to be summed over\n w_indices = [0, -1]\n if range_units[0] == u.pix:\n # Round pixels values to nearest whole indice\n w_indices[w] = int(round(use_range[w]))\n elif range_units[0] == u.Angstrom:\n # Find the closest pixel location on the average wavelength axis\n try:\n # Note: the corrected wavelength has units of [Angstrom]\n w_coords = np.mean(self.wavelength, axis=(0,1))\n except KeyError:\n print('Error: missing or invalid corrected wavelength array.')\n return None\n for w in range(2):\n abs_w_diff = np.abs(w_coords - use_range[w])\n w_indices[w] = np.argmin(abs_w_diff)\n\n sum_data = np.sum(self.data[:,:,w_indices[0]:w_indices[1]+1], axis=2)\n new_wcs = self.wcs.dropaxis(0)\n new_meta = copy.deepcopy(self.meta)\n new_meta['notes'].append('Summed wavelength axis over the range of '\n +str(use_range)+' '+str(range_units[0]))\n return NDCube(sum_data, new_wcs, meta=new_meta)",
"def setspectral(self, *args, **kwargs):\n return _coordsys.coordsys_setspectral(self, *args, **kwargs)",
"def calc_spectrum(self, cursors=None):\n if not cursors:\n cursors = self.cursors\n\n x1p = cursors['x1']\n x2p = cursors['x2']\n y1p = cursors['y1']\n y2p = cursors['y2']\n\n if(x1p == 0 and x2p == 0 and y1p == 0 and y2p == 0):\n x1p = 0\n x2p = len(self.data[1])\n y1p = 0\n y2p = len(self.data[:, 1])\n\n spectrum = np.zeros(len(self.wavelengths))\n\n for i in range(y1p, y2p):\n spectrum += self.data[i]\n\n spectrum = spectrum / (y2p - y1p)\n\n return spectrum",
"def calculate(cubes):\n rlut_cube = cubes.extract_cube(\n Constraint(name='toa_outgoing_longwave_flux'))\n rlutcs_cube = cubes.extract_cube(\n Constraint(name='toa_outgoing_longwave_flux_assuming_clear_sky'))\n\n lwcre_cube = rlutcs_cube - rlut_cube\n lwcre_cube.units = rlut_cube.units\n\n return lwcre_cube",
"def get_spectra(time_series, method=None):\r\n if method is None:\r\n method = {'this_method': 'welch'} # The default\r\n # If no choice of method was explicitly set, but other parameters were\r\n # passed, assume that the method is mlab:\r\n this_method = method.get('this_method', 'welch')\r\n\r\n if this_method == 'welch':\r\n NFFT = method.get('NFFT', default_nfft)\r\n Fs = method.get('Fs', 2 * np.pi)\r\n detrend = method.get('detrend', mlab.detrend_none)\r\n window = method.get('window', mlab.window_hanning)\r\n n_overlap = method.get('n_overlap', int(np.ceil(NFFT / 2.0)))\r\n\r\n # The length of the spectrum depends on how many sides are taken, which\r\n # depends on whether or not this is a complex object:\r\n if np.iscomplexobj(time_series):\r\n fxy_len = NFFT\r\n else:\r\n fxy_len = NFFT / 2.0 + 1\r\n\r\n # If there is only 1 channel in the time-series:\r\n if len(time_series.shape) == 1 or time_series.shape[0] == 1:\r\n temp, f = mlab.csd(time_series, time_series,\r\n NFFT, Fs, detrend, window, n_overlap,\r\n scale_by_freq=True)\r\n\r\n fxy = temp.squeeze() # the output of mlab.csd has a weird\r\n # shape\r\n else:\r\n fxy = np.zeros((time_series.shape[0],\r\n time_series.shape[0],\r\n fxy_len), dtype=complex) # Make sure it's complex\r\n\r\n for i in range(time_series.shape[0]):\r\n for j in range(i, time_series.shape[0]):\r\n #Notice funny indexing, in order to conform to the\r\n #conventions of the other methods:\r\n temp, f = mlab.csd(time_series[j], time_series[i],\r\n NFFT, Fs, detrend, window, n_overlap,\r\n scale_by_freq=True)\r\n\r\n fxy[i][j] = temp.squeeze() # the output of mlab.csd has a\r\n # weird shape\r\n elif this_method in ('multi_taper_csd', 'periodogram_csd'):\r\n # these methods should work with similar signatures\r\n mdict = method.copy()\r\n func = eval(mdict.pop('this_method'))\r\n freqs, fxy = func(time_series, **mdict)\r\n f = utils.circle_to_hz(freqs, mdict.get('Fs', 2 * np.pi))\r\n\r\n else:\r\n raise ValueError(\"Unknown method provided\")\r\n\r\n return f, fxy.squeeze()",
"def smooth_cube(self, width=3, **kwargs):\n # Validate input width\n num_dims = len(self.dimensions)\n wid_list = [1]*num_dims # NB: a width of 1 results in no smoothing\n if isinstance(width, (list, tuple)):\n # Note: we assume the last dim is always wavelength\n wid_list[0] = width[0]\n if num_dims > 2:\n wid_list[1] = width[1]\n print('Warning: smoothing over the x-axis can yield unexpected'\n +' results due to the time interval between observations.'\n +' Use with care.')\n\n if len(width) >= num_dims:\n print('Warning: smoothing over the wavelength axis is not'\n +' supported. Only widths for the Y & X axes will be used')\n elif isinstance(width, (int, float, u.Quantity)):\n wid_list[0] = width # Only smooth along y-axis\n else:\n print('Error: invalid width data type. Please input an int, float,'\n +' or astropy.units.Quantity instance', file=sys.stderr)\n return None\n\n coord_ax = ['y', 'x', 'w']\n for w in range(len(wid_list)-1):\n # Parse a astropy.units.Quantity and convert to units of pixels\n if isinstance(wid_list[w], u.Quantity):\n if wid_list[w].unit == u.pix:\n wid_list[w] = wid_list[w].value\n elif not wid_list[w].unit.physical_type == 'angle':\n print('Error: invalid width unit. Please input a pixel or'\n +' angular unit.', file=sys.stderr)\n return None\n else:\n try:\n # Note: y & x scales are in units of [arcsec]/[pixel]\n ax_scale = self.meta['pointing'][coord_ax[w]+'_scale']\n except KeyError:\n print('Error: missing '+coord_ax[w]+'-axis scale.')\n return None\n angular_wid_str = str(wid_list[w])\n wid_list[w] = wid_list[w].to('arcsec').value / ax_scale\n print('Note: on the '+coord_ax[w]+'-axis, '+angular_wid_str\n +' is equivalent to '+str(wid_list[w])+' pixels.')\n\n # Round to nearest pixel and add 1 to even values\n wid_list[w] = int(round(wid_list[w]))\n if wid_list[w] % 2 == 0:\n wid_list[w] = wid_list[w] + 1\n\n # Create smoothing kernel with normalized weights (i.e. sum to 1)\n # Note: Using a 2D or 3D kernel allows us to smooth everything at once\n sm_weights = np.ones(wid_list) / (wid_list[0]*wid_list[1])\n sm_kernel = CustomKernel(sm_weights)\n\n # Calculate smoothed data and uncertainty values\n sm_data = convolve(self.data, sm_kernel, **kwargs)\n if self.uncertainty is not None:\n sm_errs = np.sqrt(convolve(self.uncertainty.array**2,\n sm_kernel, **kwargs))\n else:\n sm_errs = none\n sm_data_mask = np.logical_or(np.isnan(sm_data), sm_data < 0)\n\n # Pack everything up in a new EISCube\n old_radcal = self.radcal\n new_meta = copy.deepcopy(self.meta)\n new_meta['notes'].append('Smoothed using pixel widths of '+str(wid_list))\n wcs_mask = (np.array(tuple(reversed(self.wcs.array_shape))) <= 1).tolist()\n\n output_cube = EISCube(sm_data, wcs=self.wcs, uncertainty=sm_errs,\n wavelength=self.wavelength, radcal=old_radcal,\n meta=new_meta, unit=self.unit,\n mask=sm_data_mask, missing_axes=wcs_mask)\n\n return output_cube",
"def CalcSpectra(x,y, input=None, output=None):\n N = max(shape(x))\n x_fft = squeeze(fft(x, None, 0)*2/N)\n y_fft = squeeze(fft(y, None, 0)*2/N)\n Gxx = norm2(x_fft)\n Gyy = norm2(y_fft)\n Gxy = (scipy.conj(x_fft))*y_fft\n return Spectra(input, output, Gxx, Gyy, Gxy)",
"def spectral_data(spectra):\n weights = np.concatenate([ s.ivar for s in spectra ])\n flux = np.concatenate([ s.flux for s in spectra ])\n wflux = weights * flux\n return (weights, flux, wflux)",
"def extract_spectra(self, cube, obj_mask='MASK_UNION', sky_mask='MASK_SKY',\n tags_to_try=('MUSE_WHITE', 'NB_LYALPHA',\n 'NB_HALPHA', 'NB_SUMOII3726'),\n skysub=True, psf=None, beta=None, lbda=None,\n apertures=None, unit_wave=u.angstrom):\n if obj_mask not in self.images:\n raise ValueError('key %s not present in the images dictionary'\n % obj_mask)\n\n if skysub and sky_mask not in self.images:\n raise ValueError('key %s not present in the images dictionary'\n % sky_mask)\n\n ima = self.images[obj_mask]\n\n if ima.wcs.sameStep(cube.wcs):\n size = ima.shape[0]\n unit_size = None\n else:\n size = ima.wcs.get_step(unit=u.arcsec)[0] * ima.shape[0]\n unit_size = u.arcsec\n\n center = (self.dec, self.ra)\n subcub = cube.subcube(center=center, size=size,\n unit_center=u.deg, unit_size=unit_size,\n lbda=lbda, unit_wave=unit_wave)\n wcsref = subcub.wcs\n\n if not ima.wcs.isEqual(wcsref):\n ima = ima.resample(\n newdim=subcub.shape[1:],\n newstart=wcsref.get_start(unit=u.deg),\n newstep=wcsref.get_step(unit=u.arcsec),\n order=0, unit_start=u.deg, unit_step=u.arcsec)\n\n object_mask = ima.data.data\n\n if skysub:\n skymask = self.images[sky_mask]\n if not skymask.wcs.isEqual(wcsref):\n skymask = skymask.resample(\n newdim=subcub.shape[1:],\n newstart=wcsref.get_start(unit=u.deg),\n newstep=wcsref.get_step(unit=u.arcsec),\n order=0, unit_start=u.deg, unit_step=u.arcsec)\n\n # Get the sky spectrum and subtract it\n self.spectra['MUSE_SKY'] = subcub.mean(weights=skymask.data.data,\n axis=(1, 2))\n subcub = subcub - self.spectra['MUSE_SKY']\n suffix = '_SKYSUB'\n else:\n suffix = ''\n\n # No weighting\n spec = (subcub * object_mask).sum(axis=(1, 2))\n self.spectra['MUSE_TOT' + suffix] = spec\n\n if apertures:\n tmpim = Image(data=np.zeros_like(object_mask, dtype=bool),\n copy=False, wcs=ima.wcs)\n for radius in apertures:\n tmpim.mask_ellipse(center, radius, 0)\n mask = object_mask.astype(bool) & tmpim.mask\n # spec = compute_spectrum(subcub, weights=mask)\n spec = (subcub * mask).sum(axis=(1, 2))\n self.spectra['MUSE_APER_%.1f%s' % (radius, suffix)] = spec\n tmpim.unmask()\n\n # Loop over the narrow-band images we want to use. Apply the object\n # mask and ensure that the weight map within the object mask is >=0.\n if tags_to_try is not None:\n nb_tags = list(set(tags_to_try) & set(self.images))\n ksel = (object_mask != 0)\n for tag in nb_tags:\n if self.images[tag].wcs.isEqual(wcsref):\n weight = self.images[tag].data.copy()\n weight[ksel] -= np.min(weight[ksel])\n weight = weight.filled(0)\n self.spectra[tag + suffix] = compute_optimal_spectrum(\n subcub, object_mask, weight)\n\n # PSF\n if psf is not None:\n if len(psf.shape) == 3:\n # PSF cube. The user is responsible for getting the\n # dimensions right\n if not np.array_equal(psf.shape, subcub.shape):\n raise ValueError('Incorrect dimensions for the PSF cube '\n '({}) (it must be ({})) '\n .format(psf.shape, subcub.shape))\n elif len(psf.shape) == 1:\n psf = create_psf_cube(subcub.shape, psf, beta=beta, wcs=wcsref)\n\n spec = compute_optimal_spectrum(subcub, object_mask, psf)\n self.spectra['MUSE_PSF' + suffix] = spec\n # Insert the PSF weighted flux - here re-normalised?",
"def get_spectrum(self, outwave=None, filters=None, peraa=False, **params):\n self.params.update(**params)\n # Pass the model parameters through to the sps object\n ncomp = len(self.params['mass'])\n for ic in range(ncomp):\n s, p, x = self.one_sed(component_index=ic, filterlist=filters)\n try:\n spec += s\n maggies += p\n extra += [x]\n except(NameError):\n spec, maggies, extra = s, p, [x]\n # `spec` is now in Lsun/Hz, with the wavelength array being the\n # observed frame wavelengths. Flux array (and maggies) have not been\n # increased by (1+z) due to cosmological redshift\n\n if outwave is not None:\n w = self.csp.wavelengths\n spec = np.interp(outwave, w, spec)\n # Distance dimming and unit conversion\n if (self.params['zred'] == 0) or ('lumdist' in self.params):\n # Use 10pc for the luminosity distance (or a number provided in the\n # lumdist key in units of Mpc). Do not apply cosmological (1+z)\n # factor to the flux.\n dfactor = (self.params.get('lumdist', 1e-5) * 1e5)**2\n a = 1.0\n else:\n # Use the comsological luminosity distance implied by this\n # redshift. Incorporate cosmological (1+z) factor on the flux.\n lumdist = cosmo.luminosity_distance(self.params['zred']).value\n dfactor = (lumdist * 1e5)**2\n a = (1 + self.params['zred'])\n if peraa:\n # spectrum will be in erg/s/cm^2/AA\n spec *= to_cgs * a / dfactor * lightspeed / outwave**2\n else:\n # Spectrum will be in maggies\n spec *= to_cgs * a / dfactor / 1e3 / (3631*jansky_mks)\n\n # Convert from absolute maggies to apparent maggies\n maggies *= a / dfactor\n \n return spec, maggies, extra",
"def collapse_to_spectrum(self, add_data=True, **kwargs):\n # get glue Data objects for the spectral cube and uncertainties\n flux_viewer = self._app.get_viewer(\n self._app._jdaviz_helper._default_flux_viewer_reference_name\n )\n uncert_viewer = self._app.get_viewer(\n self._app._jdaviz_helper._default_uncert_viewer_reference_name\n )\n [spectral_cube] = flux_viewer.data()\n [uncert_cube] = uncert_viewer.data()\n\n # This plugin collapses over the *spatial axes* (optionally over a spatial subset,\n # defaults to ``No Subset``). Since the Cubeviz parser puts the fluxes\n # and uncertainties in different glue Data objects, we translate the spectral\n # cube and its uncertainties into separate NDDataArrays, then combine them:\n if self.spatial_subset_selected != self.spatial_subset.default_text:\n nddata = spectral_cube.get_subset_object(\n subset_id=self.spatial_subset_selected, cls=NDDataArray\n )\n uncertainties = uncert_cube.get_subset_object(\n subset_id=self.spatial_subset_selected, cls=StdDevUncertainty\n )\n else:\n nddata = spectral_cube.get_object(cls=NDDataArray)\n uncertainties = uncert_cube.get_object(cls=StdDevUncertainty)\n\n # Use the spectral coordinate from the WCS:\n if '_orig_spec' in spectral_cube.meta:\n wcs = spectral_cube.meta['_orig_spec'].wcs.spectral\n else:\n wcs = spectral_cube.coords.spectral\n\n flux = nddata.data << nddata.unit\n mask = nddata.mask\n\n nddata_reshaped = NDDataArray(\n flux, mask=mask, uncertainty=uncertainties, wcs=wcs, meta=nddata.meta\n )\n\n # by default we want to use operation_ignores_mask=True in nddata:\n kwargs.setdefault(\"operation_ignores_mask\", True)\n # by default we want to propagate uncertainties:\n kwargs.setdefault(\"propagate_uncertainties\", True)\n\n # Collapse an e.g. 3D spectral cube to 1D spectrum, assuming that last axis\n # is always wavelength. This may need adjustment after the following\n # specutils PR is merged: https://github.com/astropy/specutils/pull/1033\n spatial_axes = (0, 1)\n\n collapsed_nddata = getattr(nddata_reshaped, self.function_selected.lower())(\n axis=spatial_axes, **kwargs\n ) # returns an NDDataArray\n\n # Convert to Spectrum1D, with the spectral axis in correct units:\n if hasattr(spectral_cube.coords, 'spectral_wcs'):\n target_wave_unit = spectral_cube.coords.spectral_wcs.world_axis_units[0]\n else:\n target_wave_unit = spectral_cube.coords.spectral.world_axis_units[0]\n\n flux = collapsed_nddata.data << collapsed_nddata.unit\n mask = collapsed_nddata.mask\n uncertainty = collapsed_nddata.uncertainty\n\n collapsed_spec = _return_spectrum_with_correct_units(\n flux, wcs, collapsed_nddata.meta, 'flux',\n target_wave_unit=target_wave_unit,\n uncertainty=uncertainty,\n mask=mask\n )\n\n if add_data:\n self.add_results.add_results_from_plugin(\n collapsed_spec, label=self.results_label, replace=False\n )\n\n snackbar_message = SnackbarMessage(\n \"Spectrum extracted successfully.\",\n color=\"success\",\n sender=self)\n self.hub.broadcast(snackbar_message)\n\n return collapsed_spec",
"def spectral_axis(self):\n\n if self._wcs is None:\n spec_axis = np.arange(self.size) * u.one\n else:\n spec_axis = self.wcs.wcs_pix2world(np.arange(self.size), 0)[0] * \\\n u.Unit(self.wcs.wcs.cunit[0])\n if self._spectral_unit is not None:\n spec_axis = spec_axis.to(self._spectral_unit)\n\n return spec_axis",
"def _get_spectrum(parlist, catdir):\n name = parlist[3]\n\n filename = name.split('[')[0]\n column = name.split('[')[1][:-1]\n\n filename = os.path.join(catdir, filename)\n sp = SourceSpectrum.from_file(filename, flux_col=column)\n\n totflux = sp.integrate()\n try:\n validate_totalflux(totflux)\n except synexceptions.SynphotError:\n raise exceptions.ParameterOutOfBounds(\n \"Parameter '{0}' has no valid data.\".format(parlist))\n\n result = [member for member in parlist]\n result.pop()\n result.append(sp)\n\n return result",
"def get_psf_spectrum(sky_coord, observations):\n\n if not hasattr(observations, \"__iter__\"):\n observations = (observations,)\n\n spectra = []\n for obs in observations:\n\n pixel = obs.frame.get_pixel(sky_coord)\n index = np.round(pixel).astype(np.int)\n\n psf = obs.frame.psf.get_model()\n bbox = obs.frame.psf.bbox + (0, *index)\n img = bbox.extract_from(obs.images)\n\n # img now 0 outside of observation, psf is not:\n # restrict both to observed pixels to avoid truncation effects\n mask = img[0] > 0\n psf = psf[:, mask] # flattens array in last two axes\n img = img[:, mask]\n\n # amplitude of img when projected onto psf\n # i.e. factor to multiply psf with to get img (if img looked like psf)\n spectrum = (img * psf).sum(axis=1) / (psf * psf).sum(axis=1)\n spectra.append(spectrum)\n\n spectrum = np.concatenate(spectra).reshape(-1)\n\n if np.any(spectrum <= 0):\n # If the flux in all channels is <=0,\n # the new sed will be filled with NaN values,\n # which will cause the code to crash later\n msg = \"Zero or negative spectrum {} at y={}, x={}\".format(spectrum, *sky_coord)\n if np.all(spectrum <= 0):\n logger.warning(msg)\n else:\n logger.info(msg)\n\n return spectrum",
"def spectrum_to_xyz(spectrum: Callable) -> ndarray:\n xyz = spectrum(WAVELENGTHS_380_780) @ CIE_XYZ_380_780\n xyz /= sum(xyz)\n return xyz",
"def integrate_spectrum(self):\n flux = sum(self.spectrum)\n return flux",
"def _move_spectral_axis(wcs, flux, mask=None, uncertainty=None):\n naxis = getattr(wcs, 'naxis', len(wcs.world_axis_physical_types))\n if naxis > 1:\n temp_axes = []\n phys_axes = wcs.world_axis_physical_types\n for i in range(len(phys_axes)):\n if phys_axes[i] is None:\n continue\n if phys_axes[i][0:2] == \"em\" or phys_axes[i][0:5] == \"spect\":\n temp_axes.append(i)\n if len(temp_axes) != 1:\n raise ValueError(\"Input WCS must have exactly one axis with \"\n \"spectral units, found {}\".format(len(temp_axes)))\n\n # Due to FITS conventions, a WCS with spectral axis first corresponds\n # to a flux array with spectral axis last.\n if temp_axes[0] != 0:\n wcs = wcs.swapaxes(0, temp_axes[0])\n if flux is not None:\n flux = np.swapaxes(flux, len(flux.shape) - temp_axes[0] - 1, -1)\n if mask is not None:\n mask = np.swapaxes(mask, len(mask.shape) - temp_axes[0] - 1, -1)\n if uncertainty is not None:\n if isinstance(uncertainty, NDUncertainty):\n # Account for Astropy uncertainty types\n unc_len = len(uncertainty.array.shape)\n temp_unc = np.swapaxes(uncertainty.array,\n unc_len - temp_axes[0] - 1, -1)\n if uncertainty.unit is not None:\n temp_unc = temp_unc * u.Unit(uncertainty.unit)\n uncertainty = type(uncertainty)(temp_unc)\n else:\n uncertainty = np.swapaxes(uncertainty,\n len(uncertainty.shape) -\n temp_axes[0] - 1, -1)\n return wcs, flux, mask, uncertainty",
"def test_sum_cube(self):\n self.init()\n assert sum_cube(self.i64_3) == np.sum(self.i64_3)\n assert sum_cube(self.fi64_3) == np.sum(self.fi64_3)\n assert sum_cube(self.f64_3) == np.sum(self.f64_3)\n assert sum_cube(self.ff64_3) == np.sum(self.ff64_3)\n assert type(sum_cube(self.i64_3)) == int\n assert type(sum_cube(self.fi64_3)) == int\n assert type(sum_cube(self.f64_3)) == float\n assert type(sum_cube(self.ff64_3)) == float",
"def calc_noise_in_cube(cube, masking_scheme='simple', mask=None,\n spatial_average_npix=None,\n spatial_average_nbeam=5.0,\n spectral_average_nchan=5, verbose=False):\n\n from scipy.ndimage import generic_filter\n from astropy.stats import mad_std\n\n if masking_scheme not in ['simple', 'user']:\n raise ValueError(\"'masking_scheme' should be specified as\"\n \"either 'simple' or 'user'\")\n elif masking_scheme == 'user' and mask is None:\n raise ValueError(\"'masking_scheme' set to 'user', yet \"\n \"no user-specified mask found\")\n\n # extract negative values (only needed if masking_scheme='simple')\n if masking_scheme == 'simple':\n if verbose:\n print(\"Extracting negative values...\")\n negmask = cube < (0 * cube.unit)\n negdata = cube.with_mask(negmask).filled_data[:].value\n negdata = np.stack([negdata, -1 * negdata], axis=-1)\n else:\n negdata = None\n\n # find rms noise as a function of channel\n if verbose:\n print(\"Estimating rms noise as a function of channel...\")\n if masking_scheme == 'user':\n mask_v = mask\n elif masking_scheme == 'simple':\n rms_v = mad_std(negdata, axis=(1, 2, 3), ignore_nan=True)\n uplim_v = (3 * rms_v * cube.unit).reshape(-1, 1, 1)\n lolim_v = (-3 * rms_v * cube.unit).reshape(-1, 1, 1)\n mask_v = (((cube - uplim_v) < (0 * cube.unit)) &\n ((cube - lolim_v) > (0 * cube.unit)))\n rms_v = cube.with_mask(mask_v).mad_std(axis=(1, 2)).quantity.value\n rms_v = generic_filter(rms_v, np.nanmedian,\n mode='constant', cval=np.nan,\n size=spectral_average_nchan)\n \n # find rms noise as a function of sightline\n if verbose:\n print(\"Estimating rms noise as a function of sightline...\")\n if masking_scheme == 'user':\n mask_s = mask\n elif masking_scheme == 'simple':\n rms_s = mad_std(negdata, axis=(0, 3), ignore_nan=True)\n uplim_s = 3 * rms_s * cube.unit\n lolim_s = -3 * rms_s * cube.unit\n mask_s = (((cube - uplim_s) < (0 * cube.unit)) &\n ((cube - lolim_s) > (0 * cube.unit)))\n rms_s = cube.with_mask(mask_s).mad_std(axis=0).quantity.value\n if spatial_average_npix is None:\n beamFWHM_pix = (cube.beam.major.to(u.deg).value /\n np.abs(cube.wcs.celestial.wcs.cdelt.min()))\n beamFWHM_pix = np.max([beamFWHM_pix, 3.])\n spatial_average_npix = int(spatial_average_nbeam *\n beamFWHM_pix)\n rms_s = generic_filter(rms_s, np.nanmedian,\n mode='constant', cval=np.nan,\n size=spatial_average_npix)\n\n # create rms noise cube from the tensor product of rms_v and rms_s\n if verbose:\n print(\"Creating rms noise cube (direct tensor product)...\")\n rmscube = SpectralCube(np.einsum('i,jk', rms_v, rms_s),\n wcs=cube.wcs,\n header=cube.header.copy(strip=True))\n rmscube.allow_huge_operations = cube.allow_huge_operations\n # correct the normalization of the rms cube\n if masking_scheme == 'user':\n mask_n = mask\n elif masking_scheme == 'simple':\n rms_n = mad_std(negdata, ignore_nan=True)\n uplim_n = 3 * rms_n * cube.unit\n lolim_n = -3 * rms_n * cube.unit\n mask_n = (((cube - uplim_n) < (0 * cube.unit)) &\n ((cube - lolim_n) > (0 * cube.unit)))\n rms_n = cube.with_mask(mask_n).mad_std().value\n rmscube /= rms_n\n\n # apply NaN mask\n rmscube = rmscube.with_mask(cube.mask.include())\n\n # check unit\n if rmscube.unit != cube.unit:\n rmscube = rmscube * (cube.unit / rmscube.unit)\n\n return rmscube",
"def slice_to_spectrogram(self, y_coord, x_coord=None, **kwargs):\n if self.axes_wcs.wcs.ctype[0] not in ['TIME', 'UTC']:\n raise cu.CubeError(1,\n 'Cannot create a spectrogram with no time axis')\n if self.axes_wcs.wcs.ctype[1] != 'WAVE':\n raise cu.CubeError(2, 'A spectral axis is needed in a spectrogram')\n if self.data.ndim == 3:\n data = self.data[:, :, cu.pixelize(y_coord, self.axes_wcs, 2)]\n else:\n if x_coord is None:\n raise cu.CubeError(4, 'An x-coordinate is needed for 4D cubes')\n data = self.data[:, :, cu.pixelize(y_coord, self.axes_wcs, 2),\n cu.pixelize(x_coord, self.axes_wcs, 3)]\n time_axis = self.time_axis().value\n freq_axis = self.wavelength_axis().value\n\n if 'DATE_OBS'in self.meta:\n tformat = '%Y-%m-%dT%H:%M:%S.%f'\n start = datetime.datetime.strptime(self.meta['DATE_OBS'], tformat)\n else:\n start = datetime.datetime(1, 1, 1)\n\n if 'DATE_END' in self.meta:\n tformat = '%Y-%m-%dT%H:%M:%S.%f'\n end = datetime.datetime.strptime(self.meta['DATE_END'], tformat)\n else:\n dif = time_axis[-1] - time_axis[0]\n unit = self.axes_wcs.wcs.cunit[0]\n dif = dif * u.Unit(unit)\n days = dif.to(sday)\n lapse = datetime.timedelta(days.value)\n end = start + lapse\n return Spectrogram(data=data, time_axis=time_axis, freq_axis=freq_axis,\n start=start, end=end, **kwargs)",
"def window_sumsquare(window, n_frames, hop_length, win_length, n_fft, dtype=np.float32, norm=None):\n if win_length is None:\n win_length = n_fft\n n = n_fft + hop_length * (n_frames - 1)\n x = np.zeros(n, dtype=dtype)\n win_sq = get_window(window, win_length, fftbins=True)\n win_sq = librosa_util.normalize(win_sq, norm=norm) ** 2\n win_sq = librosa_util.pad_center(win_sq, n_fft)\n for i in range(n_frames):\n sample = i * hop_length\n x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]\n return x",
"def spectral():\n c = _si.c.value\n h = _si.h.value\n hc = h * c\n two_pi = 2.0 * np.pi\n inv_m_spec = si.m**-1\n inv_m_ang = si.radian / si.m\n\n return Equivalency(\n [\n (si.m, si.Hz, lambda x: c / x),\n (si.m, si.J, lambda x: hc / x),\n (si.Hz, si.J, lambda x: h * x, lambda x: x / h),\n (si.m, inv_m_spec, lambda x: 1.0 / x),\n (si.Hz, inv_m_spec, lambda x: x / c, lambda x: c * x),\n (si.J, inv_m_spec, lambda x: x / hc, lambda x: hc * x),\n (inv_m_spec, inv_m_ang, lambda x: x * two_pi, lambda x: x / two_pi),\n (si.m, inv_m_ang, lambda x: two_pi / x),\n (si.Hz, inv_m_ang, lambda x: two_pi * x / c, lambda x: c * x / two_pi),\n (si.J, inv_m_ang, lambda x: x * two_pi / hc, lambda x: hc * x / two_pi),\n ],\n \"spectral\",\n )",
"def getAllSpectrumMeasurements(self): \n return self.spectrum",
"def get_spectrum(self, outwave=None, filters=None, nebular=True, **params):\n spec, neb, phot, ex = self.get_components(outwave, filters, **params)\n total_spec = (spec * self.params['mass'][:, None]).sum(axis=0)\n if nebular:\n total_spec += neb\n total_phot = (phot * self.params['mass'][:, None]).sum(axis=0)\n extra = (ex * self.params['mass']).sum()\n\n return total_spec, total_phot, extra",
"def get_spectrum(self, point, record):\n if not self.is_a_spectrum_file():\n raise TelemacException(\\\n \"This file does not seem to be a spectrum file\")\n\n spectrum_var = self.get_spectrum_varname(point)\n\n # Getting list of frequencies\n freqs, _ = self.get_spectrum_freq()\n\n nfreq = len(freqs)\n ntheta = self.npoin2//nfreq\n\n # Reshaping to match nfreq*ntheta\n data = self.get_data_value(spectrum_var, record)\\\n .reshape((nfreq, ntheta))\n\n # Integration over angles\n spectrum = np.sum(data, axis=1) * 2*np.pi/ntheta\n\n return freqs, spectrum",
"def spectrogram(\n inputs: jnp.array,\n pad: int,\n window: jnp.array,\n n_fft: int,\n hop_length: int,\n win_length: int,\n power: Optional[float] = 2.0,\n normalized: bool = False,\n center: bool = True,\n onesided: bool = True,\n return_complex: bool = True\n) -> jnp.array:\n if power is None and not return_complex:\n raise ValueError(f\"in correct combination of power(={power}) and return_complex(={return_complex}) provided.\")\n\n if pad > 0:\n inputs = batch_pad(inputs, pad, \"zeros\")\n\n spec_f = stft(\n inputs,\n n_fft,\n hop_length,\n win_length,\n window,\n center=center,\n onesided=onesided\n )\n\n if normalized:\n spec_f /= jnp.sqrt(jnp.sum(jnp.power(window, 2.)))\n\n if power is not None:\n if power == 1.0:\n return jnp.abs(spec_f)\n return jnp.power(jnp.abs(spec_f), power)\n if not return_complex:\n return jnp.abs(spec_f)\n return spec_f",
"def test_fft_spectrum_02():\n f, t, Sxx = _spectral_helper(x, x, fs=s_freq,\n window='hann',\n nperseg=x.shape[0],\n noverlap=0,\n nfft=None,\n return_onesided=True,\n mode='psd',\n scaling='spectrum')\n\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper='hann', scaling='energy', sides='one')\n\n assert_array_equal(f0, f)\n assert_array_almost_equal(Sxx0, Sxx[:, 0] * CORRECTION_FACTOR)",
"def window_sumsquare(window, n_frames, hop_length=200, win_length=800,\n n_fft=800, dtype=np.float32, norm=None):\n if win_length is None:\n win_length = n_fft\n\n n = n_fft + hop_length * (n_frames - 1)\n x = np.zeros(n, dtype=dtype)\n\n # Compute the squared window at the desired length\n win_sq = get_window(window, win_length, fftbins=True)\n win_sq = librosa.util.normalize(win_sq, norm=norm)**2\n win_sq = librosa.util.pad_center(win_sq, n_fft)\n\n # Fill the envelope\n for i in range(n_frames):\n sample = i * hop_length\n x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]\n return x",
"def get_pixel_spectrum(sky_coord, observations, correct_psf=False):\n\n if not hasattr(observations, \"__iter__\"):\n observations = (observations,)\n\n spectra = []\n for obs in observations:\n pixel = obs.frame.get_pixel(sky_coord)\n index = np.round(pixel).astype(np.int)\n spectrum = obs.images[:, index[0], index[1]].copy()\n\n if obs.frame.psf is not None and correct_psf:\n # image of point source in observed = obs.frame.psf\n psf_model = obs.frame.psf.get_model()\n psf_center = psf_model.max(axis=(1, 2))\n # best fit solution for the model amplitude of the center pixel\n # to yield to PSF center: (spectrum * psf_center) / psf_center**2\n # or shorter:\n spectrum /= psf_center\n\n spectra.append(spectrum)\n\n spectrum = np.concatenate(spectra).reshape(-1)\n\n if np.any(spectrum <= 0):\n # If the flux in all channels is <=0,\n # the new sed will be filled with NaN values,\n # which will cause the code to crash later\n msg = \"Zero or negative spectrum {} at y={}, x={}\".format(spectrum, *sky_coord)\n if np.all(spectrum <= 0):\n logger.warning(msg)\n else:\n logger.info(msg)\n\n return spectrum"
]
| [
"0.6098314",
"0.57695895",
"0.56967276",
"0.5517648",
"0.5394993",
"0.5346096",
"0.5340599",
"0.52352476",
"0.5234346",
"0.52302986",
"0.52094436",
"0.52033246",
"0.5161801",
"0.5134656",
"0.50857544",
"0.5084157",
"0.50615275",
"0.5031639",
"0.49863473",
"0.49710125",
"0.49620813",
"0.4951018",
"0.48951596",
"0.48828813",
"0.48451766",
"0.48389643",
"0.48325908",
"0.48290357",
"0.48249292",
"0.48248452"
]
| 0.62595874 | 0 |
For a timelambday cube, given a ycoordinate, returns a sunpy spectrogram. Keyword arguments are passed on to Spectrogram's __init__. | def slice_to_spectrogram(self, y_coord, x_coord=None, **kwargs):
if self.axes_wcs.wcs.ctype[0] not in ['TIME', 'UTC']:
raise cu.CubeError(1,
'Cannot create a spectrogram with no time axis')
if self.axes_wcs.wcs.ctype[1] != 'WAVE':
raise cu.CubeError(2, 'A spectral axis is needed in a spectrogram')
if self.data.ndim == 3:
data = self.data[:, :, cu.pixelize(y_coord, self.axes_wcs, 2)]
else:
if x_coord is None:
raise cu.CubeError(4, 'An x-coordinate is needed for 4D cubes')
data = self.data[:, :, cu.pixelize(y_coord, self.axes_wcs, 2),
cu.pixelize(x_coord, self.axes_wcs, 3)]
time_axis = self.time_axis().value
freq_axis = self.wavelength_axis().value
if 'DATE_OBS'in self.meta:
tformat = '%Y-%m-%dT%H:%M:%S.%f'
start = datetime.datetime.strptime(self.meta['DATE_OBS'], tformat)
else:
start = datetime.datetime(1, 1, 1)
if 'DATE_END' in self.meta:
tformat = '%Y-%m-%dT%H:%M:%S.%f'
end = datetime.datetime.strptime(self.meta['DATE_END'], tformat)
else:
dif = time_axis[-1] - time_axis[0]
unit = self.axes_wcs.wcs.cunit[0]
dif = dif * u.Unit(unit)
days = dif.to(sday)
lapse = datetime.timedelta(days.value)
end = start + lapse
return Spectrogram(data=data, time_axis=time_axis, freq_axis=freq_axis,
start=start, end=end, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mel_spectrogram(self, y):\n # assert(torch.min(y.data) >= -1)\n # assert(torch.max(y.data) <= 1)\n\n magnitudes, phases = self.stft_fn.transform(y)\n # magnitudes = magnitudes.data\n mel_output = torch.matmul(self.mel_basis, magnitudes)\n mel_output = self.spectral_normalize(mel_output)\n return mel_output",
"def mel_spectrogram(self, y):\n if isinstance(y, np.ndarray):\n y = torch.from_numpy(y).float()\n y = y.unsqueeze(0)\n y = torch.autograd.Variable(y, requires_grad=False)\n\n assert (torch.min(y.data) >= -1)\n assert (torch.max(y.data) <= 1)\n\n magnitudes, phases = self.stft_fn.transform(y)\n magnitudes = magnitudes.data\n mel_output = torch.matmul(self.mel_basis, magnitudes)\n mel_output = self.spectral_normalize(mel_output)\n return torch.squeeze(mel_output, 0).detach().cpu().numpy().T",
"def mel_spectrogram(self, y):\n assert torch.min(y.data) >= -1\n assert torch.max(y.data) <= 1\n magnitudes, phases = self.stft_fn.transform(y)\n magnitudes = magnitudes.data\n mel_output = torch.matmul(self.mel_basis, magnitudes)\n mel_output = self.spectral_normalize(mel_output)\n energy = torch.norm(magnitudes, dim=1)\n return mel_output, energy",
"def spectra_analysis(file_name, sky_file_name): \n\n # read file name and select out the id that we are dealing with\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n cube_id = int(re.search(r'\\d+', stk_f_n).group())\n\n # read catalogue and obtain the HST redshift estimate\n #catalogue = np.load(\"data/matched_catalogue.npy\")\n catalogue = np.load(\"data/low_redshift_catalogue.npy\")\n cat_loc = np.where(catalogue[:,0] == cube_id)[0]\n cube_info = catalogue[cat_loc][0]\n \n hst_redshift = cube_info[7]\n\n # spectra and sky noise data\n spectra_data = spectrum_creator(file_name)\n wl_soln = wavelength_solution(file_name)\n sn_data = sky_noise(sky_file_name)\n\n galaxy_data = spectra_data['galaxy']\n\n # removing baseline from data\n base = peakutils.baseline(galaxy_data, 3)\n gd_mc = galaxy_data - base\n\n # scaling sky-noise to be similar to spectra data\n gd_max = np.amax(galaxy_data)\n sn_data_max = np.amax(sn_data)\n sn_scale = gd_max / sn_data_max\n\n sn_data = sn_data * sn_scale\n\n # spectra lines\n sl = {\n 'emis': {\n '[OII]': '3727',\n 'CaK': '3933',\n 'CaH': '3968',\n 'Hdelta': '4101', \n }, \n 'abs': {'K': '3934.777',\n }\n } \n\n # we can use the redshift from the HST catalogue to define the region to search for\n # the doublet in\n\n # lower and upper bound on wavelength range\n lower_lambda = (1+hst_redshift)*3600\n upper_lambda = (1+hst_redshift)*3850\n\n # x-axis data\n data_h_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])\n mask = (lower_lambda < data_h_range) & (data_h_range < upper_lambda) \n\n lambda_data = data_h_range[mask]\n flux_data = gd_mc[mask] \n \n # Finding peaks with PeakUtils\n pu_peaks = peakutils.indexes(flux_data, thres=600, thres_abs=True)\n pu_peaks_x = peakutils.interpolate(lambda_data, flux_data, pu_peaks)\n\n pu_peaks_x = np.sort(pu_peaks_x)\n pu_peaks_x = pu_peaks_x[lower_lambda < pu_peaks_x]\n pu_peaks_x = pu_peaks_x[pu_peaks_x < upper_lambda]\n \n data_dir = 'cube_results/' + stk_f_n\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n peaks_file = open(data_dir + '/' + stk_f_n + '_peaks.txt', 'w')\n peaks_file.write(\"Peaks found on \" + str(datetime.datetime.now()) + \"\\n\\n\")\n\n peaks_file.write(\"Number Wavelength \\n\")\n for i_peak in range(len(pu_peaks_x)):\n curr_peak = pu_peaks_x[i_peak]\n peaks_file.write(str(i_peak) + \" \" + str(curr_peak) + \"\\n\")\n\n # manually selecting which peak is the [OII] peak - given in wavelength\n if (pu_peaks_x.size != 0):\n otwo_wav = float(pu_peaks_x[0]) \n otwo_acc = float(sl['emis']['[OII]'])\n\n redshift = (otwo_wav / otwo_acc) - 1\n else:\n # accepting HST redshift if cannot find peak\n redshift = hst_redshift\n\n return {'gd_shifted': gd_mc, 'sky_noise': sn_data, 'spectra': sl, 'redshift': \n redshift, 'pu_peaks': pu_peaks_x}",
"def inv_spectrogram(self, y):\n if self.use_mel:\n return self.mel_inv(y)\n if self.use_complex:\n return y\n return self.loglin_inv(y)",
"def makeSpectrogram(self, datalist):\n data = SpectrogramData(datalist)\n spectrogram = Qwt.QwtPlotSpectrogram()\n spectrogram.setData(data)\n spectrogram.attach(self)\n self.clearZoomStack()",
"def extract_mel_spectrogram(wav_path, X, y, index, curr_speaker_num, max_duration=None):\n Sxx = spectrogram_converter.mel_spectrogram(wav_path, max_duration)\n for i in range(Sxx.shape[0]):\n for j in range(Sxx.shape[1]):\n X[index, 0, i, j] = Sxx[i, j]\n y[index] = curr_speaker_num\n return 1",
"def create_spectrogram(sound_file, X, y):\n spec_file = sound_file.split(\"/\")[-1].split(\".\")[0] + \"_X\" + X + \"_y\" + y + \".png\"\n \n sound = AudioSegment.from_wav(sound_file)\n if len(sound)*(int(X)/1000) <= PIXEL_LIMIT: # max pixel limit\n command = \"sox \"+ sound_file + \" -n spectrogram -l -r -m -y \" + y + \" -X \" + X + \" -o \" + spec_file\n subprocess.call(command.split())\n return spec_file\n\n # If large file segment into chunks of smaller spectrogram and concatenate later\n chunk_size = int((PIXEL_LIMIT/int(X))*1000)\n start, stop = 0, chunk_size\n\n # Temporary folders to store temporary data in\n os.makedirs(\"sound_chunks\")\n os.makedirs(\"spec_chunks\")\n \n chunk_files = []\n chopping = True\n while chopping:\n if stop > len(sound):\n stop = len(sound)\n chopping = False\n\n # Create sound chop\n chunk_file = \"chunk\" + str(start) + \"-\" + str(stop)\n sound_chunk = sound[start:stop]\n sound_chunk.export(\"sound_chunks/\" + chunk_file + \".wav\", format=\"wav\")\n \n # Create spectrogram chop\n command = \"sox sound_chunks/\" + chunk_file + \".wav -n spectrogram -l -r -m -y \" + y + \" -X \" + X + \" -o spec_chunks/\" + chunk_file + \".png\"\n subprocess.call(command.split())\n\n # Remember filenames for concatenation later\n chunk_files.append(\"spec_chunks/\" + chunk_file + \".png\")\n start += chunk_size\n stop += chunk_size\n\n # Concatenate spectrograms into a big one\n cat_command = \"convert \"\n for chunk_file in chunk_files:\n cat_command+= chunk_file + \" \"\n cat_command += \"+append \" + spec_file\n subprocess.call(cat_command.split())\n shutil.rmtree(\"sound_chunks\")\n shutil.rmtree(\"spec_chunks\")\n return spec_file",
"def get_spectra(time_series, method=None):\r\n if method is None:\r\n method = {'this_method': 'welch'} # The default\r\n # If no choice of method was explicitly set, but other parameters were\r\n # passed, assume that the method is mlab:\r\n this_method = method.get('this_method', 'welch')\r\n\r\n if this_method == 'welch':\r\n NFFT = method.get('NFFT', default_nfft)\r\n Fs = method.get('Fs', 2 * np.pi)\r\n detrend = method.get('detrend', mlab.detrend_none)\r\n window = method.get('window', mlab.window_hanning)\r\n n_overlap = method.get('n_overlap', int(np.ceil(NFFT / 2.0)))\r\n\r\n # The length of the spectrum depends on how many sides are taken, which\r\n # depends on whether or not this is a complex object:\r\n if np.iscomplexobj(time_series):\r\n fxy_len = NFFT\r\n else:\r\n fxy_len = NFFT / 2.0 + 1\r\n\r\n # If there is only 1 channel in the time-series:\r\n if len(time_series.shape) == 1 or time_series.shape[0] == 1:\r\n temp, f = mlab.csd(time_series, time_series,\r\n NFFT, Fs, detrend, window, n_overlap,\r\n scale_by_freq=True)\r\n\r\n fxy = temp.squeeze() # the output of mlab.csd has a weird\r\n # shape\r\n else:\r\n fxy = np.zeros((time_series.shape[0],\r\n time_series.shape[0],\r\n fxy_len), dtype=complex) # Make sure it's complex\r\n\r\n for i in range(time_series.shape[0]):\r\n for j in range(i, time_series.shape[0]):\r\n #Notice funny indexing, in order to conform to the\r\n #conventions of the other methods:\r\n temp, f = mlab.csd(time_series[j], time_series[i],\r\n NFFT, Fs, detrend, window, n_overlap,\r\n scale_by_freq=True)\r\n\r\n fxy[i][j] = temp.squeeze() # the output of mlab.csd has a\r\n # weird shape\r\n elif this_method in ('multi_taper_csd', 'periodogram_csd'):\r\n # these methods should work with similar signatures\r\n mdict = method.copy()\r\n func = eval(mdict.pop('this_method'))\r\n freqs, fxy = func(time_series, **mdict)\r\n f = utils.circle_to_hz(freqs, mdict.get('Fs', 2 * np.pi))\r\n\r\n else:\r\n raise ValueError(\"Unknown method provided\")\r\n\r\n return f, fxy.squeeze()",
"def convert_to_spectral_cube(self):\n if self.data.ndim == 4:\n raise cu.CubeError(4, \"Too many dimensions: Can only convert a \" +\n \"3D cube. Slice the cube before converting\")\n if 'WAVE' not in self.axes_wcs.wcs.ctype:\n raise cu.CubeError(2, 'Spectral axis needed to create a spectrum')\n axis = 0 if self.axes_wcs.wcs.ctype[-1] == 'WAVE' else 1\n coordaxes = [1, 2] if axis == 0 else [0, 2] # Non-spectral axes\n newwcs = wu.reindex_wcs(self.axes_wcs, np.arary(coordaxes))\n time_or_x_size = self.data.shape[coordaxes[1]]\n y_size = self.data.shape[coordaxes[0]]\n spectra = np.empty((time_or_x_size, y_size), dtype=Spectrum)\n for i in range(time_or_x_size):\n for j in range(y_size):\n spectra[i][j] = self.slice_to_spectrum(i, j)\n return SpectralCube(spectra, newwcs, self.meta)",
"def extract_mel_spectrogram(wav_path, X, y, index, curr_speaker_num):\r\n Sxx = spectrogram_converter.mel_spectrogram(wav_path)\r\n for i in range(Sxx.shape[0]):\r\n for j in range(Sxx.shape[1]):\r\n X[index, 0, i, j] = Sxx[i, j]\r\n y[index] = curr_speaker_num\r\n return 1",
"def create_spectrogram(audio_file, sampling_rate = 44100):\n\n #print(audio_file)\n S, freqs, times = mlab.specgram(audio_file, NFFT=4096, Fs=sampling_rate,window=mlab.window_hanning,noverlap=2048)\n \n #print(S.shape)\n return S",
"def compute_melspec(y, params):\n melspec = librosa.feature.melspectrogram(\n y, sr=params.sr, n_mels=params.n_mels, fmin=params.fmin, fmax=params.fmax,\n )\n\n melspec = librosa.power_to_db(melspec).astype(np.float32)\n return melspec",
"def CalcSpectra(x,y, input=None, output=None):\n N = max(shape(x))\n x_fft = squeeze(fft(x, None, 0)*2/N)\n y_fft = squeeze(fft(y, None, 0)*2/N)\n Gxx = norm2(x_fft)\n Gyy = norm2(y_fft)\n Gxy = (scipy.conj(x_fft))*y_fft\n return Spectra(input, output, Gxx, Gyy, Gxy)",
"def reproject(self, y):\n return self.spectrogram(self.inv_signal(y).squeeze(1))",
"def test_spectrogram_method():\n fs = 10000\n N = 100000\n amp = 2 * np.sqrt(2)\n noise_power = 0.001 * fs / 2\n time = np.arange(N) / fs\n freq = np.linspace(1000, 2000, N)\n x = amp * chirp(time, 1000, 2.0, 6000, method=\"quadratic\") + np.random.normal(\n scale=np.sqrt(noise_power), size=time.shape\n )\n\n f, t, Sxx = spectrogram_lspopt(x, fs, c_parameter=20.0)\n f_sp, t_sp, Sxx_sp = spectrogram(x, fs)\n\n assert True",
"def plot_example_spectrograms(example,rate):\r\n plt.figure()\r\n \r\n ###YOUR CODE HERE\r\n y_lim = 40\r\n plt.title('Spectrogram')\r\n bin_space = 512 #30*rate # A typical window size is 30 seconds\r\n plt.subplot(411)\r\n plt.specgram(examples[0]/np.sum(examples[0]),NFFT=bin_space,Fs=srate)\r\n plt.ylim((0,y_lim))\r\n plt.title ('REM')\r\n plt.subplot(412)\r\n plt.title ('Stage 1 NREM')\r\n plt.specgram(examples[1]/np.sum(examples[1]),NFFT=bin_space,Fs=srate)\r\n plt.ylim((0,y_lim))\r\n plt.subplot(413)\r\n plt.title ('Stage 2 NREM')\r\n plt.specgram(examples[2]/np.sum(examples[2]),NFFT=bin_space,Fs=srate)\r\n plt.ylim((0,y_lim))\r\n plt.subplot(414)\r\n plt.title ('Stage 3/4 NREM')\r\n plt.specgram(examples[3]/np.sum(examples[3]),NFFT=bin_space,Fs=srate)\r\n plt.ylim((0,y_lim))\r\n plt.show();\r\n \r\n return",
"def plotblackbody(_zband, _yband, _jband, _hband, _kband, _parallax, _perr):\n # Set pyplot style to be consistent within the program\n plt.style.use('seaborn-whitegrid')\n # Import raw data to plot Hertzsprung-Russell diagram\n _hrdata = inithr('hr.dat')\n # Determine distance in parsecs\n _distance = 1 / np.tan(_parallax * 10**-3)\n _derr = (_perr * 10**-3) / ((_parallax * 10**-3)**2)\n # Create single data array with all bands\n _bands = [_zband, _yband, _jband, _hband, _kband]\n _lambda = [0.9, 1.02, 1.22, 1.63, 2.2]\n # Set up empty arrays for each star\n _largestar = np.zeros((1, 2))\n _smallstar = np.zeros((1, 2))\n\n # Determine the spectral flux density from the large star\n i = 0\n while i < 5:\n # Determine the maximum and minimum values of the observed band\n _max, _min = lightcurve.maxminvals(_bands[i])\n # The large star uses the maximum flux value (smallest magnitude)\n _largestar = np.append(_largestar, np.array([_lambda[i], (magtoflux(_min, i))], ndmin=2), axis=0)\n i += 1\n # Delete first empty row of the array\n _largestar = np.delete(_largestar, 0, axis=0)\n\n # Determine the spectral flux density from the small star\n i = 0\n while i < 5:\n # Determine the maximum and minimum values of the observed band\n _max, _min = lightcurve.maxminvals(_bands[i])\n # Smaller star flux value is combined value minus the large star\n _smallstar = np.append(_smallstar, np.array([_lambda[i], (magtoflux(_max, i) -\n magtoflux(_min, i))], ndmin=2), axis=0)\n i += 1\n # Delete the first empty row of the array\n _smallstar = np.delete(_smallstar, 0, axis=0)\n\n # Determine the luminosity and effective temperature of each star\n _luma, _lumaerr, _wiena = getwientemp(_largestar, _distance, _derr, 1)\n _lumb, _lumberr, _wienb = getwientemp(_smallstar, _distance, _derr, 2)\n\n # Calculate luminosities in solar units\n _solluma = _luma / (3.828*10**26)\n _sollumb = _lumb / (3.828*10**26)\n _lumaerr = _lumaerr / (3.828*10**26)\n _lumberr = _lumberr / (3.828*10**26)\n\n # Calculate masses using the mass/luminosity relation in solar mass units\n # N.B. only works as an approximation for main sequence stars, giants and dwarfs are not sutiable for this\n # approximation\n _solmassa = np.power(_solluma, 1/3.5)\n _solmassaerr = ((_solmassa * (1/3.5) * _lumaerr) / _solluma)**2\n _solmassb = np.power(_sollumb, 1/3.5)\n _solmassberr = ((_solmassb * (1 / 3.5) * _lumberr) / _sollumb) ** 2\n\n # Calculate stellar radius in solar radii using the relationship between luminosity, surface area and temperature\n _solrada = np.sqrt(_solluma / np.power(_wiena / 5778, 4))\n _solradb = np.sqrt(_sollumb / np.power(_wienb / 5778, 4))\n _solradaerr = ((_solrada * 0.5 * _lumaerr) / _solluma)**2\n _solradberr = ((_solradb * 0.5 * _lumberr) / _sollumb)**2\n\n # Output determined values to the screen and write to file\n print('Values for the large star:')\n print('Effective temperature: ' + str(round_sig(_wiena)))\n print('Solar luminosities: ' + str(round_sig(_solluma)) + ', error: ' + str(round_sig(_lumaerr)))\n print('Solar radii: ' + str(round_sig(_solrada)) + ', error: ' + str(round_sig(_solradaerr)))\n print('Solar masses: ' + str(round_sig(_solmassa)) + ', error: ' + str(round_sig(_solmassaerr)))\n print('-----------------------------------------------------')\n print('Values for the small star:')\n print('Effective temperature: ' + str(round_sig(_wienb)))\n print('Solar luminosities: ' + str(round_sig(_sollumb)) + ', error: ' + str(round_sig(_lumberr)))\n print('Solar radii: ' + str(round_sig(_solradb)) + ', error: ' + str(round_sig(_solradberr)))\n print('Solar masses: ' + str(round_sig(_solmassb)) + ', error: ' + str(round_sig(_solmassberr)))\n\n # Convert from luminosity to magnitude in solar units\n _luma = -2.5 * np.log10(_luma / (3.0128 * 10**28))\n _lumb = -2.5 * np.log10(_lumb / (3.0128 * 10**28))\n\n # Plot Hertzsprung-Russell diagram using provided array\n plt.scatter(_hrdata[:, 1], _hrdata[:, 0], s=0.5)\n # Plot determined values for each star\n plt.scatter(_wiena, _luma, s=16, c='red', label='Larger Star')\n plt.scatter(_wienb, _lumb, s=16, c='green', label='Smaller Star')\n # Set the x and y axis limits to sensible values\n plt.legend()\n plt.xlim(3000, 10000)\n plt.ylim(-10, 20)\n # Invert both axes as convention\n plt.gca().invert_xaxis()\n plt.gca().invert_yaxis()\n # Save figure to current folder\n plt.savefig('hr.png')\n # Display to screen\n plt.show()",
"def make_spectra(directory,frame):\n oober = st.short_oober(directory, frame=frame)\n #st.MakeVelocitySpectra(oober,frame)\n #st.MakeAccelSpectra(oober,frame)\n #st.MakeMagneticSpectra(oober,frame)\n st.MakeDensitySpectra(oober,frame)",
"def plt_spectrogram(X,win_length, hop_size, sample_rate, zoom_x=None, zoom_y=None,tick_labels='time-freq'):\n\n # Find the size of stft\n Nf,Nt=np.shape(X)\n\n # Compute the log magnitude spectrogram\n X=20*np.log10(np.abs(X))\n\n # Extract the first half of the spectrum for each time frame\n X=X[0:Nf/2]\n # Nf=np.shape(X)[0]\n #\n # # Generate time vector for plotting\n # times=(hop_size/float(sample_rate))*np.arange(Nt)\n #\n # # Generate frequency vector for plotting\n # freqs=(float(sample_rate)/win_length)*np.arange(Nf)\n #\n # # Generate time and frequency matrices for pcolormesh\n # times_matrix,freqs_matrix=np.meshgrid(times,freqs)\n # #\n # # Plot the log magnitude spectrogram\n # plt.title('Log magnitude spectrogram')\n # if tick_labels == 'bin-frame':\n # plt.pcolormesh(X)\n # plt.xlabel('Time-frame Number')\n # plt.ylabel('Frequency-bin Number')\n # else:\n # plt.pcolormesh(times_matrix,freqs_matrix,X)\n # plt.xlabel('Time (sec)')\n # plt.ylabel('Frequency (Hz)')\n #\n # # Zoom in on the plot if specified\n # if zoom_x is None and zoom_y is None:\n # plt.axis('tight')\n #\n # if zoom_x is not None:\n # plt.xlim(zoom_x)\n #\n # if zoom_y is not None:\n # plt.ylim(zoom_y)\n #\n return X",
"def generate_spectrum(self):\n matlab_method = self.matlab_mapper[self.matlab_script]\n n, dm, peak_locations, omega_res, n_shell, gamma_amp = matlab_method(float(self.n_max), float(self.n_max_s),\n float(self.num_channels), float(self.scale),\n float(self.omega_shift), float(self.dg),\n float(self.dgs), float(self.gamma_amp_factor),\n float(self.amp_factor), float(self.epsilon2),\n nargout=6)\n dm = [list(d) for d in dm]\n self.num_timesteps = len(dm[0])\n if type(peak_locations) == float:\n peak_locations = list([peak_locations])\n else:\n peak_locations = [list(p) for p in peak_locations]\n spectrum = Spectrum(n=n, dm=dm, peak_locations=peak_locations, n_shell=n_shell, gamma_amp=gamma_amp, **self.__dict__)\n return spectrum",
"def visualize_spectrogram(path, duration=None, offset=0, sr=44100, n_mels=128, n_fft=2048, hop_length=512):\n\n # Make a mel-scaled power (energy-squared) spectrogram\n y, sr = librosa.load(path, sr=sr, duration=duration, offset=offset)\n S = librosa.feature.melspectrogram(y, sr=sr, n_mels=n_mels, n_fft=n_fft, hop_length=hop_length)\n\n # Convert to log scale (dB)\n log_S = librosa.logamplitude(S, ref_power=1.0)\n\n # song name\n _, _, _, _, song_name = path.split(\"/\")\n\n # Render output spectrogram in the console\n plt.figure(figsize=(12, 5))\n librosa.display.specshow(log_S, sr=sr, x_axis='time', y_axis='mel')\n plt.title('mel power spectrogram for ' + song_name)\n plt.colorbar(format='%+02.0f dB')\n plt.tight_layout()",
"def spectrogram(sig, window_size=256, step_scale=4, zer_pad=2, time_array=None,fft_type='fft', log=False, normalize=0, dc_cut=0, fft_shift=0,filtered=0, freq_mask=[]):\n\n # alias for sig length\n sig=np.concatenate((np.zeros(window_size/4),sig,np.zeros(window_size/4)))\n N = len(sig)\n\n # SFFT step size,\n step = int(window_size / step_scale)\n\n if time_array is not None:\n # beating frequency\n if len(time_array) == len(sig):\n beat_freq=eval_beat_freq(time_array,window_size=window_size,zer_pad=zer_pad,fft_shift=fft_shift)\n else:\n raise ValueError('length of time array and signal are different to evaluate spectrogram')\n\n # time array for spectrogram\n time_spec = np.linspace(time_array[window_size], time_array[-window_size], num=(N - window_size) / step)\n\n # creates the window function that will 'slide' trough the signal\n # to evaluate each FFT. Kaiser seems to be the cleanest one\n #window_func = np.hanning(window_size)\n #window_func = signal.tukey(window_size, 0.25)\n window_func = signal.kaiser(window_size, 10)\n\n # if not shifting, treats as if real signal\n factor=2\n if fft_shift:\n factor=1\n\n # create a matrix to receive the spectra\n mat_Y=window_size*zer_pad/factor\n #if len(freq_mask)!=0:\n # mat_Y=len(np.where(freq_mask)[0])\n matrix = np.empty(((N - window_size) / step, mat_Y))\n\n if filtered == 1:\n b, a, zi = _init_filter()\n\n # slide window trough signal, and evaluates the FFT.\n for i in range(int((N - window_size) / step)):\n t = i * step\n new_sig = sig[t:t + window_size]\n #print(len(new_sig))\n try:\n new_sig = np.multiply(new_sig, window_func)\n except ValueError:\n print(len(new_sig), i, t)\n if t < window_size:\n new_sig = np.multiply(new_sig, window_func[:len(new - sig)])\n elif t > window_size:\n new_sig = np.multiply(new_sig, window_func[-len(new - sig):])\n if fft_type == 'fft':\n fft_sig = fftpack.fft(new_sig, n=zer_pad * window_size)[:window_size*zer_pad]\n #fft_sig = fftpack.rfft(new_sig, zer_pad * window_size)[:window_size]\n elif fft_type == 'welch':\n freqs, fft_sig = signal.welch(new_sig, nfft=zer_pad * window_size*zer_pad)\n fft_sig = fft_sig[1:window_size]\n if dc_cut == True:\n fft_sig = np.concatenate(\n ([fft_sig[1]], fft_sig[1:-1], [fft_sig[1]]))\n if fft_shift == 1:\n fft_sig = np.fft.fftshift(fft_sig)\n else:\n fft_sig=fft_sig[:len(fft_sig)/factor]\n fft_sig=abs(fft_sig)\n # if len(freq_mask)!=0:\n # fft_sig[freq_mask == False] = np.nan\n # fft_sig=fft_sig[freq_mask]\n # if normalize == True:\n # fft_sig *= (1. / fft_sig.max())\n # if log == True:\n # fft_sig = np.log(fft_sig)\n if filtered == 1:\n fft_sig = _butter_filter(fft_sig, b, a, zi)\n if 0:\n import matplotlib.pyplot as plt\n plt.figure('sfft')\n plt.clf()\n print(i, t, t + window_size, len(sig[t:t + window_size]))\n plt.plot(sig[t:t + window_size], 'b',label='signal')\n plt.plot(window_func, 'k',label='window')\n plt.plot(new_sig, 'r',label='signal w/ window')\n plt.legend(loc='best')\n plt.twinx()\n plt.plot(fft_sig, 'c')\n plt.draw()\n input('')\n\n matrix[i] = fft_sig\n\n if len(freq_mask)!=0:\n matrix=matrix[:,freq_mask]\n\n if normalize == True:\n matrix /= matrix.max(axis=1)[:, None]\n\n if log == True:\n matrix = np.log(matrix)\n\n if time_array is not None:\n return matrix.transpose(), time_spec, beat_freq\n else:\n return matrix.transpose()",
"def make_sunpy(evtdata, hdr):\n\n\t# Parse Header keywords\n\tfor field in hdr.keys():\n\t\tif field.find('TYPE') != -1:\n\t\t\tif hdr[field] == 'X':\n\t\t\t\tprint(hdr[field][5:8])\n\t\t\t\txval = field[5:8]\n\t\t\tif hdr[field] == 'Y':\n\t\t\t\tprint(hdr[field][5:8])\n\t\t\t\tyval = field[5:8]\n\t\t\n\tmin_x= hdr['TLMIN'+xval]\n\tmin_y= hdr['TLMIN'+yval]\n\tmax_x= hdr['TLMAX'+xval]\n\tmax_y= hdr['TLMAX'+yval]\n\n\tdelx = hdr['TCDLT'+xval]\n\tdely = hdr['TCDLT'+yval]\n\n\tx = evtdata['X'][:]\n\ty = evtdata['Y'][:]\n\tmet = evtdata['TIME'][:]*u.s\n\tmjdref=hdr['MJDREFI']\n\tmid_obs_time = astropy.time.Time(mjdref*u.d+met.mean(), format = 'mjd')\n\n\t# Use the native binning for now\n\n\t# Assume X and Y are the same size\n\tresample = 1.0\n\tscale = delx * resample\n\tbins = (max_x - min_x) / (resample)\n\n\tH, yedges, xedges = np.histogram2d(y, x, bins=bins, range = [[min_y,max_y], [min_x, max_x]])\n\n\n\tdict_header = {\n\t\"DATE-OBS\": mid_obs_time.iso,\n\t\"CDELT1\": scale,\n\t\"NAXIS1\": bins,\n\t\"CRVAL1\": 0.,\n\t\"CRPIX1\": bins*0.5,\n\t\"CUNIT1\": \"arcsec\",\n\t\"CTYPE1\": \"HPLN-TAN\",\n\t\"CDELT2\": scale,\n\t\"NAXIS2\": bins,\n\t\"CRVAL2\": 0.,\n\t\"CRPIX2\": bins*0.5 + 0.5,\n\t\"CUNIT2\": \"arcsec\",\n\t\"CTYPE2\": \"HPLT-TAN\",\n\t\"HGLT_OBS\": 0,\n\t\"HGLN_OBS\": 0,\n\t\"RSUN_OBS\": sun.solar_semidiameter_angular_size(mid_obs_time).value,\n\t\"RSUN_REF\": sun.constants.radius.value,\n\t\"DSUN_OBS\": sun.sunearth_distance(mid_obs_time).value\n\t}\n\t# For some reason the DSUN_OBS crashed the save...\n\n\theader = sunpy.map.MapMeta(dict_header)\n\n\tnustar_map = sunpy.map.Map(H, header)\n\t\n\treturn nustar_map",
"def _make_test_cube(long_name):\n cs = GeogCS(EARTH_RADIUS)\n data = np.array([[1.0, 1.0, 1.0], [0.0, 0.0, 0.0], [1.0, 0.0, 1.0]])\n cube = Cube(data, long_name=long_name)\n x_coord = DimCoord(\n np.linspace(-45.0, 45.0, 3), \"latitude\", units=\"degrees\", coord_system=cs\n )\n y_coord = DimCoord(\n np.linspace(120, 180, 3), \"longitude\", units=\"degrees\", coord_system=cs\n )\n cube.add_dim_coord(x_coord, 0)\n cube.add_dim_coord(y_coord, 1)\n return cube",
"def get_syls(cbin, spect_params, labels_to_use='all', syl_spect_width=-1):\n\n if labels_to_use != 'all':\n if type(labels_to_use) !=list and type(labels_to_use) != str:\n raise ValueError('labels_to_use argument should be a list or string')\n if type(labels_to_use) == str:\n labels_to_use = list(labels_to_use)\n\n dat, samp_freq = load_cbin(cbin)\n if samp_freq != spect_params['samp_freq']:\n raise ValueError(\n 'Sampling frequency for {}, {}, does not match expected sampling '\n 'frequency of {}'.format(cbin,\n samp_freq,\n spect_params['samp_freq']))\n\n notmat = load_notmat(cbin)\n onsets_Hz = np.round((notmat['onsets'] / 1000) * samp_freq).astype(int)\n offsets_Hz = np.round((notmat['offsets'] / 1000) * samp_freq).astype(int)\n if syl_spect_width > 0:\n syl_spect_width_Hz = np.round(syl_spect_width * samp_freq)\n\n all_labels = []\n all_syls = []\n\n for ind, (label, onset, offset) in enumerate(zip(notmat['labels'],onsets_Hz,offsets_Hz)):\n if 'syl_spect_width_Hz' in locals():\n syl_duration_in_samples = offset - onset\n if syl_duration_in_samples < syl_spect_width_Hz:\n raise ValueError('syllable duration of syllable {} with label {}'\n 'in file {} is greater than '\n 'width specified for all syllable spectrograms.'\n .format(ind,label,cbin))\n\n if labels_to_use == 'all':\n label = None\n elif label not in labels_to_use:\n continue\n all_labels.append(label)\n\n if 'syl_spect_width_Hz' in locals():\n width_diff = syl_spect_width_Hz - syl_duration_in_samples\n # take half of difference between syllable duration and spect width\n # so one half of 'empty' area will be on one side of spect\n # and the other half will be on other side\n # i.e., center the spectrogram\n left_width = int(round(width_diff / 2))\n right_width = width_diff - left_width\n if left_width > onset: # if duration before onset is less than left_width\n # (could happen with first onset)\n left_width = 0\n right_width = width_diff - offset\n elif offset + right_width > dat.shape[-1]:\n # if right width greater than length of file\n right_width = dat.shape[-1] - offset\n left_width = width_diff - right_width\n syl_audio = dat[:, onset - left_width:\n offset + right_width]\n else:\n syl_audio = dat[onset:offset]\n syllable = hvc.audiofileIO.make_syl_spect(syl_audio,\n samp_freq,\n nfft=spect_params['nperseg'],\n overlap=spect_params['noverlap'],\n freq_cutoffs = spect_params['freq_cutoffs'][0])\n all_syls.append(syllable)\n\n return all_syls, all_labels",
"def full_spectral_helper(x, y, NFFT=256, Fs=2, detrend=mlab.detrend_none,\n window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default',\n scale_by_freq=None):\n # The checks for if y is x are so that we can use the same function to\n #implement the core of psd(), csd(), and spectrogram() without doing\n #extra calculations. We return the unaveraged Pxy, freqs, and t.\n same_data = y is x\n\n #Make sure we're dealing with a numpy array. If y and x were the same\n #object to start with, keep them that way\n x = np.asarray(x)\n if not same_data:\n y = np.asarray(y)\n else:\n y = x\n\n # zero pad x and y up to NFFT if they are shorter than NFFT\n if len(x) < NFFT:\n n = len(x)\n x = np.resize(x, (NFFT,))\n x[n:] = 0\n\n if not same_data and len(y) < NFFT:\n n = len(y)\n y = np.resize(y, (NFFT,))\n y[n:] = 0\n\n if pad_to is None:\n pad_to = NFFT\n\n if scale_by_freq is None:\n scale_by_freq = True\n\n # For real x, ignore the negative frequencies unless told otherwise\n if (sides == 'default' and np.iscomplexobj(x)) or sides == 'twosided':\n numFreqs = pad_to\n scaling_factor = 1.\n elif sides in ('default', 'onesided'):\n numFreqs = pad_to // 2 + 1\n scaling_factor = 2.\n else:\n raise ValueError(\"sides must be one of: 'default', 'onesided', or \"\n \"'twosided'\")\n\n if cbook.iterable(window):\n assert (len(window) == NFFT)\n windowVals = window\n else:\n windowVals = window(np.ones((NFFT,), x.dtype))\n\n step = NFFT - noverlap\n ind = np.arange(0, len(x) - NFFT + 1, step)\n n = len(ind)\n Pxx = np.zeros((numFreqs, n), np.float_)\n Pyy = np.zeros((numFreqs, n), np.float_)\n Pxy = np.zeros((numFreqs, n), np.complex_)\n\n # do the ffts of the slices\n for i in range(n):\n thisX = x[ind[i]:ind[i] + NFFT]\n thisX = windowVals * detrend(thisX)\n fx = np.fft.fft(thisX, n=pad_to)\n\n if same_data:\n fy = fx\n else:\n thisY = y[ind[i]:ind[i] + NFFT]\n thisY = windowVals * detrend(thisY)\n fy = np.fft.fft(thisY, n=pad_to)\n Pxy[:, i] = np.conjugate(fx[:numFreqs]) * fy[:numFreqs]\n Pxx[:, i] = np.conjugate(fx[:numFreqs]) * fx[:numFreqs]\n Pyy[:, i] = np.conjugate(fy[:numFreqs]) * fy[:numFreqs]\n\n # Scale the spectrum by the norm of the window to compensate for\n # windowing loss; see Bendat & Piersol Sec 11.5.2.\n Pxy /= (np.abs(windowVals) ** 2).sum()\n Pxx /= (np.abs(windowVals) ** 2).sum()\n Pyy /= (np.abs(windowVals) ** 2).sum()\n\n # Also include scaling factors for one-sided densities and dividing by the\n # sampling frequency, if desired. Scale everything, except the DC component\n # and the NFFT/2 component:\n Pxy[1:-1] *= scaling_factor\n Pxx[1:-1] *= scaling_factor\n Pyy[1:-1] *= scaling_factor\n\n # MATLAB divides by the sampling frequency so that density function\n # has units of dB/Hz and can be integrated by the plotted frequency\n # values. Perform the same scaling here.\n if scale_by_freq:\n Pxy /= Fs\n Pyy /= Fs\n Pxx /= Fs\n\n t = 1. / Fs * (ind + NFFT / 2.)\n freqs = float(Fs) / pad_to * np.arange(numFreqs)\n\n if (np.iscomplexobj(x) and sides == 'default') or sides == 'twosided':\n # center the frequency range at zero\n freqs = np.concatenate((freqs[numFreqs // 2:] - Fs, freqs[:numFreqs // 2]))\n Pxy = np.concatenate((Pxy[numFreqs // 2:, :], Pxy[:numFreqs // 2, :]), 0)\n Pxx = np.concatenate((Pxx[numFreqs // 2:, :], Pxx[:numFreqs // 2, :]), 0)\n Pyy = np.concatenate((Pyy[numFreqs // 2:, :], Pyy[:numFreqs // 2, :]), 0)\n\n return Pxx, Pyy, Pxy, freqs, t",
"def extract_spectrogram(path):\n y, _ = librosa.load(path, sr=Config.sample_rate)\n\n # Remove leading and trailing silence\n y, _ = librosa.effects.trim(y)\n\n # Preemphasis (upscale frequencies and downscale them later to reduce noise)\n y = np.append(y[0], y[1:] - Config.preemphasis*y[:-1])\n\n # Convert the waveform to a complex spectrogram by a short-time Fourier transform\n linear = librosa.stft(y=y, n_fft=Config.num_fft_samples, hop_length=Config.hop_length,\n win_length=Config.window_length)\n\n # Only consider the magnitude of the spectrogram\n mag = np.abs(linear)\n\n # Compute the mel spectrogram\n mel_basis = librosa.filters.mel(Config.sample_rate, Config.num_fft_samples, Config.mel_size)\n mel = np.dot(mel_basis, mag)\n\n # To decibel\n mel = 20 * np.log10(np.maximum(1e-5, mel))\n mag = 20 * np.log10(np.maximum(1e-5, mag))\n\n # Normalize\n mel = np.clip((mel - Config.ref_db + Config.max_db) / Config.max_db, 1e-8, 1)\n mag = np.clip((mag - Config.ref_db + Config.max_db) / Config.max_db, 1e-8, 1)\n\n # Transpose\n mel = mel.T.astype(np.float32)\n mag = mag.T.astype(np.float32)\n\n return mel, mag",
"def plot_hypnogram(eeg, stages, srate):\r\n \r\n fig,ax1 = plt.subplots() #Needed for the multiple y-axes\r\n \r\n #Use the specgram function to draw the spectrogram as usual\r\n ax1.specgram(eeg, NFFT=256, Fs=srate)\r\n\r\n #Label your x and y axes and set the y limits for the spectrogram\r\n ax1.set_xlabel('Time (seconds)')\r\n ax1.set_ylabel('Frequency (Hz)')\r\n ax1.set_ylim(ymax=30)\r\n \r\n ax2 = ax1.twinx() #Necessary for multiple y-axes\r\n \r\n #Use ax2.plot to draw the hypnogram. Be sure your x values are in seconds\r\n #HINT: Use drawstyle='steps' to allow step functions in your plot\r\n i = 0\r\n bin_size = 30*srate\r\n c = np.zeros(len(eeg)/bin_size)\r\n while i + bin_size < len(eeg):\r\n c[i/bin_size] = classify_epoch(eeg[range(i,i+bin_size)],srate)\r\n i = i + bin_size\r\n \r\n xx = range(0, c.size*30, 30)\r\n ax2.plot(xx,c, drawstyle='steps')\r\n ax2.set_xlim(xmax=3000) #max=3000 for Test, max=3600 for Practice\r\n\r\n #Label your right y-axis and change the text color to match your plot\r\n ax2.set_ylabel('NREM Stage',color='b')\r\n ax2.set_ylim(0.5,3.5)\r\n \r\n #Set the limits for the y-axis \r\n \r\n #Only display the possible values for the stages\r\n ax2.set_yticks(np.arange(1,4))\r\n \r\n #Change the left axis tick color to match your plot\r\n for t1 in ax2.get_yticklabels():\r\n t1.set_color('b')\r\n \r\n #Title your plot \r\n plt.title('Hypnogram and Spectogram for Test Data')\r\n \r\n plt.show()",
"def spectrum(syst, x, y=None, params=None, mask=None, file=None,\n show=True, dpi=None, fig_size=None, ax=None):\n\n if not mpl_available:\n raise RuntimeError(\"matplotlib was not found, but is required \"\n \"for plot_spectrum()\")\n if y is not None and not has3d:\n raise RuntimeError(\"Installed matplotlib does not support 3d plotting\")\n\n if isinstance(syst, system.FiniteSystem):\n def ham(**kwargs):\n return syst.hamiltonian_submatrix(params=kwargs, sparse=False)\n elif callable(syst):\n ham = syst\n else:\n raise TypeError(\"Expected 'syst' to be a finite Kwant system \"\n \"or a function.\")\n\n params = params or dict()\n keys = (x[0],) if y is None else (x[0], y[0])\n array_values = (x[1],) if y is None else (x[1], y[1])\n\n # calculate spectrum on the grid of points\n spectrum = []\n bound_ham = functools.partial(ham, **params)\n for point in itertools.product(*array_values):\n p = dict(zip(keys, point))\n if mask and mask(**p):\n spectrum.append(None)\n else:\n h_p = np.atleast_2d(bound_ham(**p))\n spectrum.append(np.linalg.eigvalsh(h_p))\n # massage masked grid points into a list of NaNs of the appropriate length\n n_eigvals = len(next(filter(lambda s: s is not None, spectrum)))\n nan_list = [np.nan] * n_eigvals\n spectrum = [nan_list if s is None else s for s in spectrum]\n # make into a numpy array and reshape\n new_shape = [len(v) for v in array_values] + [-1]\n spectrum = np.array(spectrum).reshape(new_shape)\n\n # set up axes\n if ax is None:\n fig = _make_figure(dpi, fig_size, use_pyplot=(file is None))\n if y is None:\n ax = fig.add_subplot(1, 1, 1)\n else:\n warnings.filterwarnings('ignore',\n message=r'.*mouse rotation disabled.*')\n ax = fig.add_subplot(1, 1, 1, projection='3d')\n warnings.resetwarnings()\n ax.set_xlabel(keys[0])\n if y is None:\n ax.set_ylabel('Energy')\n else:\n ax.set_ylabel(keys[1])\n ax.set_zlabel('Energy')\n ax.set_title(', '.join('{} = {}'.format(*kv) for kv in params.items()))\n else:\n fig = None\n\n # actually do the plot\n if y is None:\n ax.plot(array_values[0], spectrum)\n else:\n if not hasattr(ax, 'plot_surface'):\n msg = (\"When providing an axis for plotting over a 2D domain the \"\n \"axis should be created with 'projection=\\\"3d\\\"\")\n raise TypeError(msg)\n # plot_surface cannot directly handle rank-3 values, so we\n # explicitly loop over the last axis\n grid = np.meshgrid(*array_values)\n for i in range(spectrum.shape[-1]):\n spec = spectrum[:, :, i].transpose() # row-major to x-y ordering\n ax.plot_surface(*(grid + [spec]), cstride=1, rstride=1)\n\n _maybe_output_fig(fig, file=file, show=show)\n\n return fig"
]
| [
"0.62373495",
"0.61111325",
"0.5878095",
"0.5321949",
"0.52246",
"0.52145845",
"0.51144654",
"0.50932705",
"0.5055656",
"0.50293887",
"0.50188696",
"0.50037694",
"0.49115393",
"0.4873337",
"0.4850232",
"0.48383042",
"0.48147768",
"0.4789717",
"0.4733066",
"0.4707515",
"0.4701444",
"0.46674123",
"0.4662637",
"0.46607566",
"0.46445313",
"0.46355072",
"0.46288314",
"0.46188608",
"0.46098733",
"0.46072063"
]
| 0.6674686 | 0 |
For a hypercube, return a 3D cube that has been cut along the given axis and with data corresponding to the given chunk. | def slice_to_cube(self, axis, chunk, **kwargs):
if self.data.ndim == 3:
raise cu.CubeError(4, 'Can only slice a hypercube into a cube')
item = [slice(None, None, None) for _ in range(4)]
if isinstance(chunk, tuple):
if cu.iter_isinstance(chunk, (u.Quantity, u.Quantity)):
pixel0 = cu.convert_point(chunk[0].value, chunk[0].unit,
self.axes_wcs, axis)
pixel1 = cu.convert_point(chunk[1].value, chunk[1].unit,
self.axes_wcs, axis)
item[axis] = slice(pixel0, pixel1, None)
elif cu.iter_isinstance((chunk, int, int)):
item[axis] = slice(chunk[0], chunk[1], None)
else:
raise cu.CubeError(5, "Parameters must be of the same type")
newdata = self.data[item].sum(axis)
else:
unit = chunk.unit if isinstance(chunk, u.Quantity) else None
pixel = cu.convert_point(chunk, unit, self.axes_wcs, axis)
item[axis] = pixel
newdata = self.data[item]
wcs_indices = [0, 1, 2, 3]
wcs_indices.remove(3 - axis)
newwcs = wu.reindex_wcs(self.axes_wcs, np.array(wcs_indices))
if axis == 2 or axis == 3:
newwcs = wu.add_celestial_axis(newwcs)
newwcs.was_augmented = True
cube = Cube(newdata, newwcs, meta=self.meta, **kwargs)
return cube | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def explode_along_axis(self, axis):\n # If axis is -ve then calculate the axis from the length of the dimensions of one cube\n if axis < 0:\n axis = len(self.dimensions) + axis\n # To store the resultant cube\n result_cubes = []\n # All slices are initially initialised as slice(None, None, None)\n cube_slices = [slice(None, None, None)] * self.data.ndim\n # Slicing the cube inside result_cube\n for i in range(self.data.shape[axis]):\n # Setting the slice value to the index so that the slices are done correctly.\n cube_slices[axis] = i\n # Set to None the metadata of sliced cubes.\n item = tuple(cube_slices)\n sliced_cube = self[item]\n sliced_cube.meta = None\n # Appending the sliced cubes in the result_cube list\n result_cubes.append(sliced_cube)\n # Creating a new NDCubeSequence with the result_cubes and common axis as axis\n return NDCubeSequence(result_cubes, meta=self.meta)",
"def split_cube(cube):\n # Split first axis\n ax0 = cube[0]\n ax0 -= 0.5\n ax0[ax0 < 0] = ax0[ax0 < 0] * 2 - 1\n ax0[ax0 > 0] = ax0[ax0 > 0] * 2\n if cube.shape[0] > 1:\n # Scale other axes to be in a useful range for floor divide\n cube[1:] = cube[1:] * 4\n # Define the shifts\n displace = cube[1:].floor() % 2\n shift = displace[0]\n # We need an algebra that satisies: 1 * 0 = 0, 1 * 1 = 1, 0 * 1 = 0, 0 * 0 = 1\n # This is achieved with * = (==)\n for ax in displace[1:]:\n shift = shift == ax\n ax0 += shift\n cube[1:] -= 2\n cube *= 2\n return cube.t()",
"def getCube(unique_name):",
"def nurbsCube(*args, axis: Union[List[float, float, float], bool]=None, caching: bool=True,\n degree: Union[int, bool]=3, heightRatio: Union[float, bool]=1.0, lengthRatio:\n Union[float, bool]=1.0, nodeState: Union[int, bool]=0, patchesU: Union[int,\n bool]=1, patchesV: Union[int, bool]=1, pivot: Union[List[float, float, float],\n bool]=None, width: Union[float, bool]=1.0, constructionHistory: bool=True, name:\n AnyStr=\"\", object: bool=True, polygon: int=0, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass",
"def slice_in_3d(axis, shape, plane):\n Z = np.array([[0, 0, 0],\n [1, 0, 0],\n [1, 1, 0],\n [0, 1, 0],\n [0, 0, 1],\n [1, 0, 1],\n [1, 1, 1],\n [0, 1, 1]])\n\n Z = Z * shape\n\n r = [-1, 1]\n\n X, Y = np.meshgrid(r, r)\n\n # plotting vertices\n axis.scatter3D(Z[:, 0], Z[:, 1], Z[:, 2])\n\n # list of sides' polygons of figure\n verts = [[Z[0], Z[1], Z[2], Z[3]],\n [Z[4], Z[5], Z[6], Z[7]],\n [Z[0], Z[1], Z[5], Z[4]],\n [Z[2], Z[3], Z[7], Z[6]],\n [Z[1], Z[2], Z[6], Z[5]],\n [Z[4], Z[7], Z[3], Z[0]],\n [Z[2], Z[3], Z[7], Z[6]]]\n\n # plotting sides\n axis.add_collection3d(\n Poly3DCollection(verts,\n facecolors=(0, 1, 1, 0.25),\n linewidths=1,\n edgecolors='darkblue')\n )\n\n verts = np.array([[[0, 0, 0],\n [0, 0, 1],\n [0, 1, 1],\n [0, 1, 0]]])\n verts = verts * shape\n verts += [plane, 0, 0]\n\n axis.add_collection3d(\n Poly3DCollection(verts,\n facecolors='magenta',\n linewidths=1,\n edgecolors='black')\n )\n\n axis.set_xlabel('plane')\n axis.set_ylabel('col')\n axis.set_zlabel('row')\n\n # auto-scale plot axes\n scaling = np.array([getattr(axis, 'get_{}lim'.format(dim))() for dim in 'xyz'])\n axis.auto_scale_xyz(*[[np.min(scaling), np.max(scaling)]] * 3)\n\n return None",
"def index_as_cube(self):\n return _IndexAsCubeSlicer(self)",
"def Cube(center=(0.0, 0.0, 0.0), x_length=1.0, y_length=1.0, z_length=1.0, bounds=None, clean=True):\n src = _vtk.vtkCubeSource()\n if bounds is not None:\n if np.array(bounds).size != 6:\n raise TypeError(\n 'Bounds must be given as length 6 tuple: (xMin, xMax, yMin, yMax, zMin, zMax)'\n )\n src.SetBounds(bounds)\n else:\n src.SetCenter(center)\n src.SetXLength(x_length)\n src.SetYLength(y_length)\n src.SetZLength(z_length)\n src.Update()\n cube = wrap(src.GetOutput())\n\n # add face index data for compatibility with PlatonicSolid\n # but make it inactive for backwards compatibility\n cube.cell_data.set_array([1, 4, 0, 3, 5, 2], 'FaceIndex')\n\n # clean duplicate points\n if clean:\n cube.clean(inplace=True)\n\n return cube",
"def read_cube(infile, allow_huge=True):\n\n if allow_huge:\n casaStuff.exportfits(imagename=infile,\n fitsimage=infile + '.fits',\n stokeslast=False, overwrite=True)\n hdu = pyfits.open(infile + '.fits')[0]\n cube = hdu.data.T\n\n # Remove intermediate fits file\n os.system('rm -rf ' + infile + '.fits')\n else:\n myia = au.createCasaTool(casaStuff.iatool)\n myia.open(infile)\n cube = myia.getchunk()\n myia.close()\n\n return cube",
"def transform_cube(self,\n cube: xr.Dataset,\n gm: GridMapping,\n cube_config: CubeConfig) -> TransformedCube:\n return cube, gm, cube_config",
"def transform_cube(self,\n cube: xr.Dataset,\n gm: GridMapping,\n cube_config: CubeConfig) -> TransformedCube:",
"def generate_cube():\n \n num_voxels = 31\n\n data_x = []\n data_y = []\n data_z = []\n data_intensity = []\n\n volume = numpy.zeros((num_voxels, num_voxels, num_voxels))\n\n for x in range(num_voxels):\n for y in range(num_voxels):\n for z in range(num_voxels):\n\n if 5 < x < 10 and 5 < y < 10:\n data_x.append(x)\n data_y.append(y)\n data_z.append(z)\n data_intensity.append(200.0)\n\n volume[x,y,z] = 200.0\n\n\n return data_x, data_y, data_z, data_intensity, volume",
"def mat_2d_to_3d(x, agg_num, hop):\n # Pad to at least one block. \n len_x, n_in = x.shape\n if (len_x < agg_num):\n x = np.concatenate((x, np.zeros((agg_num - len_x, n_in))))\n \n # Segment 2d to 3d. \n len_x = len(x)\n i1 = 0\n x3d = []\n while (i1 + agg_num <= len_x):\n x3d.append(x[i1 : i1 + agg_num])\n i1 += hop\n return np.array(x3d)",
"def test_2_2_3D_cube_splits(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0),\n (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5), (0.0, 0.5, 0.5),\n (0.0, 0.0, 0.5), (0.0, 0.5, 0.0), (0.5, 0.0, 0.5),\n (0.5, 0.0, 0.0),\n (0.5, 0.5, 0.0), (0.25, 0.25, 0.25), (1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5),\n (1.0, 0.5, 1.0), (0.5, 1.0, 0.5), (0.5, 1.0, 1.0),\n (0.5, 0.5, 1.0),\n (0.75, 0.75, 0.75), (1.0, 0.0, 0.5), (1.0, 0.5, 0.0),\n (0.75, 0.25, 0.25), (0.5, 1.0, 0.0), (0.75, 0.75, 0.25),\n (0.5, 0.0, 1.0), (0.75, 0.25, 0.75), (0.0, 1.0, 0.5),\n (0.25, 0.75, 0.25), (0.0, 0.5, 1.0), (0.25, 0.75, 0.75),\n (0.25, 0.25, 0.75), (0.5, 0.25, 0.25), (0.5, 0.5, 0.25),\n (0.5, 0.25, 0.5), (0.25, 0.5, 0.25), (0.25, 0.5, 0.5),\n (0.25, 0.25, 0.5), (0.375, 0.375, 0.375), (0.0, 0.25, 0.25),\n (0.0, 0.0, 0.25), (0.0, 0.25, 0.0), (0.25, 0.0, 0.25),\n (0.25, 0.0, 0.0), (0.25, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.5, 0.25), (0.0, 0.25, 0.5), (0.125, 0.375, 0.375),\n (0.25, 0.0, 0.5), (0.125, 0.125, 0.375), (0.25, 0.5, 0.0),\n (0.125, 0.375, 0.125), (0.5, 0.0, 0.25), (0.375, 0.125, 0.375),\n (0.5, 0.25, 0.0), (0.375, 0.125, 0.125), (0.375, 0.375, 0.125),\n (0.5, 0.75, 0.75), (0.5, 0.5, 0.75), (0.5, 0.75, 0.5),\n (0.75, 0.5, 0.75), (0.75, 0.5, 0.5), (0.75, 0.75, 0.5),\n (0.625, 0.625, 0.625), (1.0, 0.75, 0.75), (1.0, 1.0, 0.75),\n (1.0, 0.75, 1.0), (0.75, 1.0, 0.75), (0.75, 1.0, 1.0),\n (0.75, 0.75, 1.0), (0.875, 0.875, 0.875), (1.0, 0.5, 0.75),\n (1.0, 0.75, 0.5), (0.875, 0.625, 0.625), (0.75, 1.0, 0.5),\n (0.875, 0.875, 0.625), (0.75, 0.5, 1.0), (0.875, 0.625, 0.875),\n (0.5, 1.0, 0.75), (0.625, 0.875, 0.625), (0.5, 0.75, 1.0),\n (0.625, 0.875, 0.875), (0.625, 0.625, 0.875),\n (0.75, 0.5, 0.25),\n (0.75, 0.25, 0.5), (0.625, 0.375, 0.375), (1.0, 0.25, 0.25),\n (1.0, 0.0, 0.25), (1.0, 0.25, 0.0), (0.75, 0.0, 0.25),\n (0.75, 0.0, 0.0), (0.75, 0.25, 0.0), (0.875, 0.125, 0.125),\n (1.0, 0.5, 0.25), (1.0, 0.25, 0.5), (0.875, 0.375, 0.375),\n (0.75, 0.0, 0.5), (0.875, 0.125, 0.375), (0.75, 0.5, 0.0),\n (0.875, 0.375, 0.125), (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.625, 0.375, 0.125), (0.5, 0.75, 0.25),\n (0.625, 0.625, 0.375),\n (1.0, 0.75, 0.25), (1.0, 1.0, 0.25), (1.0, 0.75, 0.0),\n (0.75, 1.0, 0.25), (0.75, 1.0, 0.0), (0.75, 0.75, 0.0),\n (0.875, 0.875, 0.125), (0.875, 0.625, 0.375),\n (0.875, 0.875, 0.375),\n (0.875, 0.625, 0.125), (0.5, 1.0, 0.25), (0.625, 0.875, 0.375),\n (0.5, 0.75, 0.0), (0.625, 0.875, 0.125), (0.625, 0.625, 0.125),\n (0.5, 0.25, 0.75), (0.625, 0.375, 0.625), (1.0, 0.25, 0.75),\n (1.0, 0.0, 0.75), (1.0, 0.25, 1.0), (0.75, 0.0, 0.75),\n (0.75, 0.0, 1.0), (0.75, 0.25, 1.0), (0.875, 0.125, 0.875),\n (0.875, 0.375, 0.625), (0.875, 0.125, 0.625),\n (0.875, 0.375, 0.875),\n (0.5, 0.0, 0.75), (0.625, 0.125, 0.625), (0.5, 0.25, 1.0),\n (0.625, 0.125, 0.875), (0.625, 0.375, 0.875),\n (0.25, 0.75, 0.5),\n (0.375, 0.625, 0.375), (0.0, 0.75, 0.25), (0.0, 1.0, 0.25),\n (0.0, 0.75, 0.0), (0.25, 1.0, 0.25), (0.25, 1.0, 0.0),\n (0.25, 0.75, 0.0), (0.125, 0.875, 0.125), (0.0, 0.75, 0.5),\n (0.125, 0.625, 0.375), (0.25, 1.0, 0.5), (0.125, 0.875, 0.375),\n (0.125, 0.625, 0.125), (0.375, 0.875, 0.375),\n (0.375, 0.875, 0.125),\n (0.375, 0.625, 0.125), (0.25, 0.5, 0.75),\n (0.375, 0.625, 0.625),\n (0.0, 0.75, 0.75), (0.0, 1.0, 0.75), (0.0, 0.75, 1.0),\n (0.25, 1.0, 0.75), (0.25, 1.0, 1.0), (0.25, 0.75, 1.0),\n (0.125, 0.875, 0.875), (0.0, 0.5, 0.75), (0.125, 0.625, 0.625),\n (0.125, 0.875, 0.625), (0.25, 0.5, 1.0), (0.125, 0.625, 0.875),\n (0.375, 0.875, 0.625), (0.375, 0.875, 0.875),\n (0.375, 0.625, 0.875),\n (0.375, 0.375, 0.625), (0.0, 0.25, 0.75), (0.0, 0.0, 0.75),\n (0.0, 0.25, 1.0), (0.25, 0.0, 0.75), (0.25, 0.0, 1.0),\n (0.25, 0.25, 1.0), (0.125, 0.125, 0.875),\n (0.125, 0.375, 0.625),\n (0.125, 0.125, 0.625), (0.125, 0.375, 0.875),\n (0.375, 0.125, 0.625),\n (0.375, 0.125, 0.875), (0.375, 0.375, 0.875)]\n\n nn_checks = {(0.5, 0.25, 0.25): [(0.375, 0.375, 0.125), (0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25),\n (0.625, 0.375, 0.375),\n (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.5, 0.5, 0.25), (0.25, 0.25, 0.25),\n (0.375, 0.375, 0.375),\n (0.5, 0.25, 0.5), (0.5, 0.5, 0.5),\n (0.5, 0.0, 0.25),\n (0.375, 0.125, 0.375), (0.5, 0.0, 0.5),\n (0.5, 0.25, 0.0),\n (0.375, 0.125, 0.125), (0.5, 0.0, 0.0),\n (0.625, 0.375, 0.125)],\n (0.625, 0.625, 0.875): [(0.75, 0.5, 1.0),\n (0.75, 0.75, 1.0),\n (0.5, 0.75, 1.0), (0.5, 0.5, 1.0),\n (0.5, 0.5, 0.75),\n (0.5, 0.75, 0.75),\n (0.75, 0.5, 0.75),\n (0.75, 0.75, 0.75)],\n (0, 0, 0): [(0.0, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.0, 0.25), (0.25, 0.0, 0.0),\n (0.0, 0.25, 0.25), (0.25, 0.25, 0.0),\n (0.25, 0.0, 0.25)]}\n\n init_triangulation(3, 2, check, nn_checks)",
"def __init__(self, cube_size, time_range):\n\n # cubesize is in z,y,x for interactions with tile/image data\n self.zdim, self.ydim, self.xdim = self.cubesize = [cube_size[2], cube_size[1], cube_size[0]]\n self.time_range = time_range\n self._newcube = False",
"def polyCube(*args, axis: Union[List[float, float, float], bool]=None, caching: bool=True,\n constructionHistory: bool=True, createUVs: Union[int, bool]=3, depth: Union[float,\n bool]=1.0, height: Union[float, bool]=1.0, name: AnyStr=\"\", nodeState: Union[int,\n bool]=0, object: bool=True, subdivisionsDepth: Union[int, bool]=1,\n subdivisionsHeight: Union[int, bool]=1, subdivisionsWidth: Union[int, bool]=1,\n subdivisionsX: Union[int, bool]=1, subdivisionsY: Union[int, bool]=1,\n subdivisionsZ: Union[int, bool]=1, texture: Union[int, bool]=1, width: Union[float,\n bool]=1.0, q=True, query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr],\n Any]:\n pass",
"def define_cube_slice(grid_input: list) -> dict:\n pad = (20 - len(grid_input[0])) // 2\n blank_grid = [[\".\"] * 20] * 20\n grid_output = [[\".\"] * 20] * 6\n for line in grid_input:\n line = [\".\"] * pad + line.split() + [\".\"] * pad\n if len(line) % 2 == 1:\n line.append(\".\")\n grid_output.append(line)\n grid_output += [[\".\"] * 20] * 6\n big_cube = {}\n for i in range(0, 21):\n big_cube[i] = blank_grid\n big_cube[10] = grid_output\n return big_cube",
"def _get_cube(var_type, group_by_attribute, attr, datasets):\n key = _get_key(var_type, attr)\n logger.info(\"Found the following datasets for '%s':\\n%s\", key,\n pformat([d['filename'] for d in datasets]))\n if 'error' in var_type:\n logger.debug(\"Calculating cube for '%s' by squared error aggregation\",\n key)\n ref_cube = iris.load_cube(datasets[0]['filename'])\n cube = mlr.get_squared_error_cube(ref_cube, datasets)\n mlr.square_root_metadata(cube)\n cube.data = np.ma.sqrt(cube.data)\n else:\n if len(datasets) != 1:\n raise ValueError(f\"Expected exactly one dataset for '{key}' got \"\n f\"{len(datasets):d}:\\n\"\n f\"{pformat([d['filename'] for d in datasets])}\")\n cube = iris.load_cube(datasets[0]['filename'])\n dataset_names = sorted(list({d['dataset'] for d in datasets}))\n end_years = list({d['end_year'] for d in datasets})\n filenames = sorted(list({d['filename'] for d in datasets}))\n projects = sorted(list({d['project'] for d in datasets}))\n start_years = list({d['start_year'] for d in datasets})\n cube.attributes.update({\n 'dataset': '|'.join(dataset_names),\n 'end_year': max(end_years),\n 'filename': '|'.join(filenames),\n 'project': '|'.join(projects),\n 'start_year': min(start_years),\n 'tag': datasets[0]['tag'],\n 'var_type': var_type,\n })\n if attr is not None:\n cube.attributes[group_by_attribute] = attr\n if cube.coords('time', dim_coords=True):\n ih.unify_time_coord(cube)\n return cube",
"def match_det2cube(x, y, sliceno, start_slice, input_model, transform,\n spaxel_flux,\n spaxel_weight,\n spaxel_iflux,\n xcoord, zcoord,\n crval1, crval3, cdelt1, cdelt3, naxis1, naxis2):\n nxc = len(xcoord)\n nzc = len(zcoord)\n\n\n sliceno_use = sliceno - start_slice + 1\n# 1-1 mapping in beta\n yy = sliceno_use - 1\n\n pixel_dq = input_model.dq[y, x]\n\n all_flags = (dqflags.pixel['DO_NOT_USE'] + dqflags.pixel['DROPOUT'] +\n dqflags.pixel['NON_SCIENCE'] +\n dqflags.pixel['DEAD'] + dqflags.pixel['HOT'] +\n dqflags.pixel['RC'] + dqflags.pixel['NONLINEAR'])\n # find the location of all the values to reject in cube building\n good_data = np.where((np.bitwise_and(pixel_dq, all_flags) == 0))\n\n # good data holds the location of pixels we want to map to cube\n x = x[good_data]\n y = y[good_data]\n\n #center of first pixel, x,y = 1 for Adrian's equations\n # but we want the pixel corners, x,y values passed into this routine start at 0\n pixel_flux = input_model.data[y, x]\n\n yy_bot = y\n yy_top = y + 1\n xx_left = x\n xx_right = x + 1\n\n alpha, beta, lam = transform(x, y)\n alpha1, beta1, lam1 = transform(xx_left, yy_bot)\n alpha2, beta2, lam2 = transform(xx_right, yy_bot)\n alpha3, beta3, lam3 = transform(xx_right, yy_top)\n alpha4, beta4, lam4 = transform(xx_left, yy_top)\n\n nn = len(x)\n # Loop over all pixels in slice\n for ipixel in range(0, nn - 1):\n\n # detector pixel -> 4 corners\n # In alpha,wave space\n # in beta space: beta center + width\n\n alpha_corner = []\n wave_corner = []\n\n alpha_corner.append(alpha1[ipixel])\n alpha_corner.append(alpha2[ipixel])\n alpha_corner.append(alpha3[ipixel])\n alpha_corner.append(alpha4[ipixel])\n\n wave_corner.append(lam1[ipixel])\n wave_corner.append(lam2[ipixel])\n wave_corner.append(lam3[ipixel])\n wave_corner.append(lam4[ipixel])\n\n#________________________________________________________________________________\n# Now it does not matter the WCS method used\n alpha_min = min(alpha_corner)\n alpha_max = max(alpha_corner)\n wave_min = min(wave_corner)\n wave_max = max(wave_corner)\n #_______________________________________________________________________\n\n Area = FindAreaQuad(alpha_min, wave_min, alpha_corner, wave_corner)\n\n # estimate the where the pixel overlaps in the cube\n # find the min and max values in the cube xcoord,ycoord and zcoord\n\n MinA = (alpha_min - crval1) / cdelt1\n MaxA = (alpha_max - crval1) / cdelt1\n ix1 = max(0, int(math.trunc(MinA)))\n ix2 = int(math.ceil(MaxA))\n if ix2 >= nxc:\n ix2 = nxc - 1\n\n MinW = (wave_min - crval3) / cdelt3\n MaxW = (wave_max - crval3) / cdelt3\n iz1 = int(math.trunc(MinW))\n iz2 = int(math.ceil(MaxW))\n if iz2 >= nzc:\n iz2 = nzc - 1\n #_______________________________________________________________________\n # loop over possible overlapping cube pixels\n# noverlap = 0\n nplane = naxis1 * naxis2\n\n for zz in range(iz1, iz2 + 1):\n zcenter = zcoord[zz]\n istart = zz * nplane\n\n for xx in range(ix1, ix2 + 1):\n cube_index = istart + yy * naxis1 + xx #yy = slice # -1\n xcenter = xcoord[xx]\n AreaOverlap = SH_FindOverlap(xcenter, zcenter,\n cdelt1, cdelt3,\n alpha_corner, wave_corner)\n\n if AreaOverlap > 0.0:\n AreaRatio = AreaOverlap / Area\n spaxel_flux[cube_index] = spaxel_flux[cube_index] + (AreaRatio * pixel_flux[ipixel])\n spaxel_weight[cube_index] = spaxel_weight[cube_index] + AreaRatio\n spaxel_iflux[cube_index] = spaxel_iflux[cube_index] + 1",
"def _get_cube(datasets, short_name):\n datasets = select_metadata(datasets, short_name=short_name)\n if len(datasets) != 1:\n raise ValueError(\n f\"Expected exactly one dataset with short_name '{short_name}', \"\n f\"got {len(datasets):d}:\\n{datasets}\")\n return iris.load_cube(datasets[0]['filename'],\n NameConstraint(var_name=short_name))",
"def chunk(cube,\n output,\n format=None,\n params=None,\n chunks=None,\n quiet=None,\n verbosity=None):\n configure_cli_output(quiet=quiet, verbosity=verbosity)\n\n chunk_sizes = None\n if chunks:\n chunk_sizes = parse_cli_kwargs(chunks, metavar=\"CHUNKS\")\n for k, v in chunk_sizes.items():\n if not isinstance(v, int) or v <= 0:\n raise click.ClickException(\"Invalid value for CHUNKS, \"\n f\"chunk sizes must be positive integers: {chunks}\")\n\n write_kwargs = dict()\n if params:\n write_kwargs = parse_cli_kwargs(params, metavar=\"PARAMS\")\n\n from xcube.core.chunk import chunk_dataset\n from xcube.core.dsio import guess_dataset_format\n from xcube.core.dsio import open_dataset, write_dataset\n\n format_name = format if format else guess_dataset_format(output)\n\n with open_dataset(input_path=cube) as ds:\n if chunk_sizes:\n for k in chunk_sizes:\n if k not in ds.dims:\n raise click.ClickException(\"Invalid value for CHUNKS, \"\n f\"{k!r} is not the name of any dimension: {chunks}\")\n\n chunked_dataset = chunk_dataset(ds, chunk_sizes=chunk_sizes, format_name=format_name)\n write_dataset(chunked_dataset, output_path=output, format_name=format_name, **write_kwargs)",
"def set_up_threshold_cube():\n test_data = 50*np.arange(16).reshape(4, 4)\n grid_x = DimCoord(np.arange(4), standard_name=\"projection_x_coordinate\",\n units=\"km\")\n grid_y = DimCoord(np.arange(4), standard_name=\"projection_y_coordinate\",\n units=\"km\")\n test_cube = iris.cube.Cube(test_data, long_name=\"surface_altitude\",\n units=\"m\",\n dim_coords_and_dims=[(grid_y, 0), (grid_x, 1)])\n return test_cube",
"def _getitem3d(self, index):\n\n lovects = self._getlovects()\n hivects = self._gethivects()\n fields = self._getfields()\n\n ix = index[0]\n iy = index[1]\n iz = index[2]\n\n if len(fields[0].shape) > self.dim:\n ncomps = fields[0].shape[-1]\n else:\n ncomps = 1\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[-1]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - self.nghosts\n ny = hivects[1,:].max() - self.nghosts\n nz = hivects[2,:].max() - self.nghosts\n\n if npes > 1:\n nx = comm_world.allreduce(nx, op=mpi.MAX)\n ny = comm_world.allreduce(ny, op=mpi.MAX)\n nz = comm_world.allreduce(nz, op=mpi.MAX)\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -self.nghosts, -self.nghosts)\n ixstop = min(ix.stop or nx + 1 + self.nghosts, nx + self.overlaps[0] + self.nghosts)\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iy, slice):\n iystart = max(iy.start or -self.nghosts, -self.nghosts)\n iystop = min(iy.stop or ny + 1 + self.nghosts, ny + self.overlaps[1] + self.nghosts)\n else:\n iystart = iy\n iystop = iy + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -self.nghosts, -self.nghosts)\n izstop = min(iz.stop or nz + 1 + self.nghosts, nz + self.overlaps[2] + self.nghosts)\n else:\n izstart = iz\n izstop = iz + 1\n\n # --- Setup the size of the array to be returned and create it.\n # --- Space is added for multiple components if needed.\n sss = (max(0, ixstop - ixstart),\n max(0, iystop - iystart),\n max(0, izstop - izstart))\n if ncomps > 1 and ic is None:\n sss = tuple(list(sss) + [ncomps])\n resultglobal = np.zeros(sss, dtype=_libwarpx._numpy_real_dtype)\n\n datalist = []\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iy1 = max(iystart, lovects[1,i])\n iy2 = min(iystop, lovects[1,i] + fields[i].shape[1])\n iz1 = max(izstart, lovects[2,i])\n iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])\n\n if ix1 < ix2 and iy1 < iy2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iy1 - lovects[1,i], iy2 - lovects[1,i]),\n slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iy1 - iystart, iy2 - iystart),\n slice(iz1 - izstart, iz2 - izstart))\n\n datalist.append((vslice, fields[i][sss]))\n\n if npes == 1:\n all_datalist = [datalist]\n else:\n all_datalist = comm_world.allgather(datalist)\n\n for datalist in all_datalist:\n for vslice, ff in datalist:\n resultglobal[vslice] = ff\n\n # --- Now remove any of the reduced dimensions.\n sss = [slice(None), slice(None), slice(None)]\n if not isinstance(ix, slice):\n sss[0] = 0\n if not isinstance(iy, slice):\n sss[1] = 0\n if not isinstance(iz, slice):\n sss[2] = 0\n\n return resultglobal[tuple(sss)]",
"def cylinder(*args, axis: Union[List[float, float, float], bool]=None, caching: bool=True,\n degree: Union[int, bool]=3, endSweep: Union[float, bool]=2, heightRatio:\n Union[float, bool]=2.0, nodeState: Union[int, bool]=0, pivot: Union[List[float,\n float, float], bool]=None, radius: Union[float, bool]=1.0, sections: Union[int,\n bool]=8, spans: Union[int, bool]=1, startSweep: Union[float, bool]=0, tolerance:\n Union[float, bool]=0.01, useTolerance: bool=False, constructionHistory: bool=True,\n name: AnyStr=\"\", object: bool=True, polygon: int=0, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass",
"def init_reshape(cube, nside):\n half_nside = 2**nside / 2\n \n dim1 = cube.shape[1]/2 - half_nside\n dim2 = cube.shape[1]/2 + half_nside\n dim3 = cube.shape[2]/2 - half_nside\n dim4 = cube.shape[2]/2 + half_nside\n\n return cube[:, dim1:dim2, dim3:dim4]",
"def fix_data(self, cube: Cube) -> Cube:\n return cube",
"def slice_to_map(self, chunk, snd_dim=None, *args, **kwargs):\n if self.axes_wcs.wcs.ctype[1] == 'WAVE' and self.data.ndim == 3:\n error = \"Cannot construct a map with only one spatial dimension\"\n raise cu.CubeError(3, error)\n if isinstance(chunk, tuple):\n item = slice(cu.pixelize(chunk[0], self.axes_wcs, -1),\n cu.pixelize(chunk[1], self.axes_wcs, -1), None)\n maparray = self.data[item].sum(0)\n else:\n maparray = self.data[cu.pixelize(chunk, self.axes_wcs, -1)]\n\n if self.data.ndim == 4:\n if snd_dim is None:\n error = \"snd_dim must be given when slicing hypercubes\"\n raise cu.CubeError(4, error)\n\n if isinstance(snd_dim, tuple):\n item = slice(cu.pixelize(snd_dim[0], self.axes_wcs, -1),\n cu.pixelize(snd_dim[1], self.axes_wcs, -1), None)\n maparray = maparray[item].sum(0)\n else:\n maparray = maparray[cu.pixelize(snd_dim, self.axes_wcs, -1)]\n\n mapheader = MetaDict(self.meta)\n gmap = GenericMap(data=maparray, header=mapheader, *args, **kwargs)\n return gmap",
"def test_2_1_3D_cube_init(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0), (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5)]\n\n nn_checks = {\n (1, 1, 1): [(1, 1, 0), (0, 1, 1), (1, 0, 0), (0, 0, 1), (1, 0, 1),\n (0.5, 0.5, 0.5), (0, 1, 0)],\n (1, 0, 1): [(1, 0, 0), (0, 0, 1), (0, 0, 0), (0.5, 0.5, 0.5),\n (1, 1, 1)],\n (0.5, 0.5, 0.5): [(1, 1, 0), (0, 1, 1), (0, 1, 0), (1, 0, 0),\n (0, 0, 1), (1, 0, 1), (0, 0, 0), (1, 1, 1)]}\n\n init_triangulation(3, 0, check, nn_checks)",
"def createCube():\n subjects, detections, antigen = getAxes()\n cube = np.full([len(subjects), len(detections), len(antigen)], np.nan)\n\n IGG = importIGG()\n glycan, dfGlycan = importGlycan()\n glyCube = np.full([len(subjects), len(glycan)], np.nan)\n\n for k, curAnti in enumerate(antigen):\n lumx = importLuminex(curAnti)\n\n for _, row in lumx.iterrows():\n i = subjects.index(row[\"subject\"])\n j = detections.index(row[\"variable\"])\n cube[i, j, k] = row[\"value\"]\n\n for _, row in dfGlycan.iterrows():\n i = subjects.index(row[\"subject\"])\n j = glycan.index(row[\"variable\"])\n glyCube[i, j] = row[\"value\"]\n\n # Add IgG data on the end as another detection\n for _, row in IGG.iterrows():\n i = subjects.index(row[\"subject\"])\n k = antigen.index(row[\"variable\"])\n cube[i, -1, k] = row[\"value\"]\n\n # Clip to 0 as there are a few strongly negative outliers\n cube = np.clip(cube, 1.0, None)\n glyCube = np.clip(glyCube, 0.1, None)\n\n cube = np.log10(cube)\n glyCube = np.log10(glyCube)\n\n # Mean center each measurement\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n cube -= np.nanmean(cube, axis=0)\n glyCube -= np.nanmean(glyCube, axis=0)\n\n # Check that there are no slices with completely missing data\n assert ~np.any(np.all(np.isnan(cube), axis=(0, 1)))\n assert ~np.any(np.all(np.isnan(cube), axis=(0, 2)))\n assert ~np.any(np.all(np.isnan(cube), axis=(1, 2)))\n\n glyCube *= np.sqrt(np.nanvar(cube) / np.nanvar(glyCube))\n return cube, glyCube",
"def split_3Darray(array2d, L_window):\n N, ch = array2d.shape\n n_windows = N//L_window\n array3d = np.zeros((n_windows, L_window, ch))\n for i in range(n_windows):\n array3d[i]=array2d[i*L_window: (i+1)*L_window,:] \n \n return array3d",
"def test_3d_time():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/data/test%03d.fid\")\n sdic,sdata = ng.pipe.read(\"common_data/3d_pipe/data/test001.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n\n # and the first slice\n assert sdata.shape == (88, 1250)\n assert sdata.dtype == 'complex64'\n assert round(sdata[1,2].real,2) == -7.98\n assert round(sdata[1,2].imag,2) == 33.82\n assert round(sdata[22,5].real,2) == 22.65\n assert round(sdata[22,5].imag,2) == 13.65\n\n # slice/data matching\n assert_array_equal(data[0],sdata)\n\n write_readback_3D(dic,data)"
]
| [
"0.63870597",
"0.61239403",
"0.60539335",
"0.5832195",
"0.57542986",
"0.56327647",
"0.559867",
"0.5501042",
"0.5500518",
"0.5481983",
"0.5481307",
"0.54722977",
"0.5436329",
"0.53984386",
"0.53860784",
"0.53848904",
"0.53785336",
"0.5359201",
"0.5354345",
"0.5327404",
"0.5268032",
"0.5249178",
"0.52241427",
"0.5216195",
"0.5194057",
"0.5178005",
"0.5140795",
"0.5137084",
"0.51272506",
"0.5118619"
]
| 0.83219737 | 0 |
Converts this cube into a SpectralCube. It will only work if the cube has exactly three dimensions and one of those is a spectral axis. | def convert_to_spectral_cube(self):
if self.data.ndim == 4:
raise cu.CubeError(4, "Too many dimensions: Can only convert a " +
"3D cube. Slice the cube before converting")
if 'WAVE' not in self.axes_wcs.wcs.ctype:
raise cu.CubeError(2, 'Spectral axis needed to create a spectrum')
axis = 0 if self.axes_wcs.wcs.ctype[-1] == 'WAVE' else 1
coordaxes = [1, 2] if axis == 0 else [0, 2] # Non-spectral axes
newwcs = wu.reindex_wcs(self.axes_wcs, np.arary(coordaxes))
time_or_x_size = self.data.shape[coordaxes[1]]
y_size = self.data.shape[coordaxes[0]]
spectra = np.empty((time_or_x_size, y_size), dtype=Spectrum)
for i in range(time_or_x_size):
for j in range(y_size):
spectra[i][j] = self.slice_to_spectrum(i, j)
return SpectralCube(spectra, newwcs, self.meta) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def spectral_model(self):\n d = self.data\n spec_type = d['SpectrumType'].strip()\n pars, errs = {}, {}\n pars['amplitude'] = d['Flux_Density']\n errs['amplitude'] = d['Unc_Flux_Density']\n pars['reference'] = d['Pivot_Energy']\n\n if spec_type == 'PowerLaw':\n pars['index'] = d['Spectral_Index'] * u.dimensionless_unscaled\n errs['index'] = d['Unc_Spectral_Index'] * u.dimensionless_unscaled\n model = PowerLaw(**pars)\n elif spec_type == 'LogParabola':\n pars['alpha'] = d['Spectral_Index'] * u.dimensionless_unscaled\n pars['beta'] = d['beta'] * u.dimensionless_unscaled\n errs['alpha'] = d['Unc_Spectral_Index'] * u.dimensionless_unscaled\n errs['beta'] = d['Unc_beta'] * u.dimensionless_unscaled\n model = LogParabola(**pars)\n else:\n raise ValueError('Spectral model {} not available'.format(spec_type))\n\n model.parameters.set_parameter_errors(errs)\n return model",
"def spectral_model(self):\n spec_type = self.data['SpectrumType'].strip()\n pars, errs = {}, {}\n pars['amplitude'] = self.data['Flux_Density']\n errs['amplitude'] = self.data['Unc_Flux_Density']\n pars['reference'] = self.data['Pivot_Energy']\n\n if spec_type == 'PowerLaw':\n pars['index'] = self.data['Spectral_Index'] * u.dimensionless_unscaled\n errs['index'] = self.data['Unc_Spectral_Index'] * u.dimensionless_unscaled\n model = PowerLaw(**pars)\n elif spec_type == 'PLExpCutoff':\n pars['index'] = self.data['Spectral_Index'] * u.dimensionless_unscaled\n pars['ecut'] = self.data['Cutoff']\n errs['index'] = self.data['Unc_Spectral_Index'] * u.dimensionless_unscaled\n errs['ecut'] = self.data['Unc_Cutoff']\n model = ExponentialCutoffPowerLaw3FGL(**pars)\n elif spec_type == 'LogParabola':\n pars['alpha'] = self.data['Spectral_Index'] * u.dimensionless_unscaled\n pars['beta'] = self.data['beta'] * u.dimensionless_unscaled\n errs['alpha'] = self.data['Unc_Spectral_Index'] * u.dimensionless_unscaled\n errs['beta'] = self.data['Unc_beta'] * u.dimensionless_unscaled\n model = LogParabola(**pars)\n elif spec_type == \"PLSuperExpCutoff\":\n # TODO: why convert to GeV here? Remove?\n pars['reference'] = pars['reference'].to('GeV')\n pars['index_1'] = self.data['Spectral_Index'] * u.dimensionless_unscaled\n pars['index_2'] = self.data['Exp_Index'] * u.dimensionless_unscaled\n pars['ecut'] = self.data['Cutoff'].to('GeV')\n errs['index_1'] = self.data['Unc_Spectral_Index'] * u.dimensionless_unscaled\n errs['index_2'] = self.data['Unc_Exp_Index'] * u.dimensionless_unscaled\n errs['ecut'] = self.data['Unc_Cutoff'].to('GeV')\n model = PLSuperExpCutoff3FGL(**pars)\n else:\n raise ValueError('Spectral model {} not available'.format(spec_type))\n\n model.parameters.set_parameter_errors(errs)\n return model",
"def spectral_model(self):\n pars, errs = {}, {}\n pars['amplitude'] = self.data['Flux']\n pars['emin'], pars['emax'] = self.energy_range\n pars['index'] = self.data['Spectral_Index'] * u.dimensionless_unscaled\n errs['amplitude'] = self.data['Unc_Flux']\n errs['index'] = self.data['Unc_Spectral_Index'] * u.dimensionless_unscaled\n model = PowerLaw2(**pars)\n model.parameters.set_parameter_errors(errs)\n return model",
"def spectral_model(self):\n pars, errs = {}, {}\n pars['amplitude'] = self.data['Flux50']\n pars['emin'], pars['emax'] = self.energy_range\n pars['index'] = self.data['Spectral_Index'] * u.dimensionless_unscaled\n\n errs['amplitude'] = self.data['Unc_Flux50']\n errs['index'] = self.data['Unc_Spectral_Index'] * u.dimensionless_unscaled\n\n model = PowerLaw2(**pars)\n model.parameters.set_parameter_errors(errs)\n return model",
"def to_spectral(self, field_grid):\n return self._spharm.grdtospec(field_grid, self._ntrunc)",
"def slice_to_spectrum(self, *coords, **kwargs):\n if 'WAVE' not in self.axes_wcs.wcs.ctype:\n raise cu.CubeError(2, 'Spectral axis needed to create a spectrum')\n axis = -1 if self.axes_wcs.wcs.ctype[0] == 'WAVE' else -2\n pixels = [cu.pixelize(coord, self.axes_wcs, axis) for coord in coords]\n item = range(len(pixels))\n if axis == -1:\n item[1:] = pixels\n item[0] = slice(None, None, None)\n item = [slice(None, None, None) if i is None else i for i in item]\n else:\n item[0] = pixels[0]\n item[1] = slice(None, None, None)\n item[2:] = pixels[1:]\n item = [slice(None, None, None) if i is None else i for i in item]\n\n data = self.data[item]\n errors = (None if self.uncertainty is None else self.uncertainty[item])\n mask = None if self.mask is None else self.mask[item]\n for i in range(len(pixels)):\n if pixels[i] is None:\n if i == 0:\n sumaxis = 1 if axis == -1 else 0\n else:\n sumaxis = 1 if i == 2 else i\n data = data.sum(axis=sumaxis)\n mask = mask.sum(axis=sumaxis)\n kwargs.update({'uncertainty': errors, 'mask': mask})\n wavelength_axis = self.wavelength_axis()\n freq_axis, cunit = wavelength_axis.value, wavelength_axis.unit\n err = self.uncertainty[item] if self.uncertainty is not None else None\n kwargs.update({'uncertainty': err})\n return Spectrum(np.array(data), np.array(freq_axis), cunit, **kwargs)",
"def collapse_to_spectrum(self, add_data=True, **kwargs):\n # get glue Data objects for the spectral cube and uncertainties\n flux_viewer = self._app.get_viewer(\n self._app._jdaviz_helper._default_flux_viewer_reference_name\n )\n uncert_viewer = self._app.get_viewer(\n self._app._jdaviz_helper._default_uncert_viewer_reference_name\n )\n [spectral_cube] = flux_viewer.data()\n [uncert_cube] = uncert_viewer.data()\n\n # This plugin collapses over the *spatial axes* (optionally over a spatial subset,\n # defaults to ``No Subset``). Since the Cubeviz parser puts the fluxes\n # and uncertainties in different glue Data objects, we translate the spectral\n # cube and its uncertainties into separate NDDataArrays, then combine them:\n if self.spatial_subset_selected != self.spatial_subset.default_text:\n nddata = spectral_cube.get_subset_object(\n subset_id=self.spatial_subset_selected, cls=NDDataArray\n )\n uncertainties = uncert_cube.get_subset_object(\n subset_id=self.spatial_subset_selected, cls=StdDevUncertainty\n )\n else:\n nddata = spectral_cube.get_object(cls=NDDataArray)\n uncertainties = uncert_cube.get_object(cls=StdDevUncertainty)\n\n # Use the spectral coordinate from the WCS:\n if '_orig_spec' in spectral_cube.meta:\n wcs = spectral_cube.meta['_orig_spec'].wcs.spectral\n else:\n wcs = spectral_cube.coords.spectral\n\n flux = nddata.data << nddata.unit\n mask = nddata.mask\n\n nddata_reshaped = NDDataArray(\n flux, mask=mask, uncertainty=uncertainties, wcs=wcs, meta=nddata.meta\n )\n\n # by default we want to use operation_ignores_mask=True in nddata:\n kwargs.setdefault(\"operation_ignores_mask\", True)\n # by default we want to propagate uncertainties:\n kwargs.setdefault(\"propagate_uncertainties\", True)\n\n # Collapse an e.g. 3D spectral cube to 1D spectrum, assuming that last axis\n # is always wavelength. This may need adjustment after the following\n # specutils PR is merged: https://github.com/astropy/specutils/pull/1033\n spatial_axes = (0, 1)\n\n collapsed_nddata = getattr(nddata_reshaped, self.function_selected.lower())(\n axis=spatial_axes, **kwargs\n ) # returns an NDDataArray\n\n # Convert to Spectrum1D, with the spectral axis in correct units:\n if hasattr(spectral_cube.coords, 'spectral_wcs'):\n target_wave_unit = spectral_cube.coords.spectral_wcs.world_axis_units[0]\n else:\n target_wave_unit = spectral_cube.coords.spectral.world_axis_units[0]\n\n flux = collapsed_nddata.data << collapsed_nddata.unit\n mask = collapsed_nddata.mask\n uncertainty = collapsed_nddata.uncertainty\n\n collapsed_spec = _return_spectrum_with_correct_units(\n flux, wcs, collapsed_nddata.meta, 'flux',\n target_wave_unit=target_wave_unit,\n uncertainty=uncertainty,\n mask=mask\n )\n\n if add_data:\n self.add_results.add_results_from_plugin(\n collapsed_spec, label=self.results_label, replace=False\n )\n\n snackbar_message = SnackbarMessage(\n \"Spectrum extracted successfully.\",\n color=\"success\",\n sender=self)\n self.hub.broadcast(snackbar_message)\n\n return collapsed_spec",
"def audio_to_spect(self, audio, pad_for_hop_length=True, pad_for_shape_preserving_inverse=False):\n if pad_for_shape_preserving_inverse:\n audio = self.pad_for_shape_preserving_inverse(audio)\n elif pad_for_hop_length:\n audio = self.pad_for_hop_length(audio)\n\n spect = np.abs(librosa.stft(audio, win_length=self.win_length, n_fft=self.win_length, hop_length=self.hop_length, center=False, window='hann'))\n\n return spect",
"def spectral_axis(self):\n\n if self._wcs is None:\n spec_axis = np.arange(self.size) * u.one\n else:\n spec_axis = self.wcs.wcs_pix2world(np.arange(self.size), 0)[0] * \\\n u.Unit(self.wcs.wcs.cunit[0])\n if self._spectral_unit is not None:\n spec_axis = spec_axis.to(self._spectral_unit)\n\n return spec_axis",
"def get_spectrum(self, outwave=None, filters=None, peraa=False, **params):\n self.params.update(**params)\n # Pass the model parameters through to the sps object\n ncomp = len(self.params['mass'])\n for ic in range(ncomp):\n s, p, x = self.one_sed(component_index=ic, filterlist=filters)\n try:\n spec += s\n maggies += p\n extra += [x]\n except(NameError):\n spec, maggies, extra = s, p, [x]\n # `spec` is now in Lsun/Hz, with the wavelength array being the\n # observed frame wavelengths. Flux array (and maggies) have not been\n # increased by (1+z) due to cosmological redshift\n\n if outwave is not None:\n w = self.csp.wavelengths\n spec = np.interp(outwave, w, spec)\n # Distance dimming and unit conversion\n if (self.params['zred'] == 0) or ('lumdist' in self.params):\n # Use 10pc for the luminosity distance (or a number provided in the\n # lumdist key in units of Mpc). Do not apply cosmological (1+z)\n # factor to the flux.\n dfactor = (self.params.get('lumdist', 1e-5) * 1e5)**2\n a = 1.0\n else:\n # Use the comsological luminosity distance implied by this\n # redshift. Incorporate cosmological (1+z) factor on the flux.\n lumdist = cosmo.luminosity_distance(self.params['zred']).value\n dfactor = (lumdist * 1e5)**2\n a = (1 + self.params['zred'])\n if peraa:\n # spectrum will be in erg/s/cm^2/AA\n spec *= to_cgs * a / dfactor * lightspeed / outwave**2\n else:\n # Spectrum will be in maggies\n spec *= to_cgs * a / dfactor / 1e3 / (3631*jansky_mks)\n\n # Convert from absolute maggies to apparent maggies\n maggies *= a / dfactor\n \n return spec, maggies, extra",
"def to_spectral_img(data):\n assert data.size(-1) == 2\n\n spectral_vol = torch.zeros([data.size(-2), data.size(-2), data.size(-2)])\n\n for i in range(data.size(-2)):\n kspc1 = torch.zeros(data.size())\n kspc1[:, i, :] = data[:, i, :]\n img1 = ifft2(kspc1)\n img1_abs = complex_abs(img1)\n\n spectral_vol[i, :, :] = img1_abs\n\n return spectral_vol",
"def b_transform_cube(b_obj):\n \n b_scale_object()\n b_scale_single_face(b_obj)",
"def split_cube(cube):\n # Split first axis\n ax0 = cube[0]\n ax0 -= 0.5\n ax0[ax0 < 0] = ax0[ax0 < 0] * 2 - 1\n ax0[ax0 > 0] = ax0[ax0 > 0] * 2\n if cube.shape[0] > 1:\n # Scale other axes to be in a useful range for floor divide\n cube[1:] = cube[1:] * 4\n # Define the shifts\n displace = cube[1:].floor() % 2\n shift = displace[0]\n # We need an algebra that satisies: 1 * 0 = 0, 1 * 1 = 1, 0 * 1 = 0, 0 * 0 = 1\n # This is achieved with * = (==)\n for ax in displace[1:]:\n shift = shift == ax\n ax0 += shift\n cube[1:] -= 2\n cube *= 2\n return cube.t()",
"def numpy_to_cube(np_array, similar_cube, dimensions):\n\n new_cube = iris.cube.Cube.copy(similar_cube) # copy similar cube\n\n # time, lat, lon\n if dimensions == 3:\n new_cube.data[:,:,:] = np.nan # convert new cube entries to nan\n new_cube.data[:,:,:] = np_array # fill with numpy array data\n\n # lat, lon\n elif dimensions == 2:\n new_cube.data[:,:] = np.nan # convert new cube entries to nan\n new_cube.data[:,:] = np_array # fill with numpy array data\n\n # either time, lat or lon only\n elif dimensions == 1:\n new_cube.data[:] = np.nan # convert new cube entries to nan\n new_cube.data[:] = np_array # fill with numpy array data\n\n # return the numpy array, failed to convert to a cube\n else:\n print('failed to convert')\n new_cube = np_array\n\n return new_cube",
"def to_pycuber(self) -> pycuber.Cube:\n self.soft_align_faces()\n qpos_copy = self.sim.data.qpos.copy()\n\n cubies = []\n\n for i in range(27):\n cubelet_meta = self.cubelet_meta_info[i]\n\n if cubelet_meta[\"type\"] == \"cubelet\":\n mtx = self._cubelet_rotation_matrix(cubelet_meta, qpos_copy)\n\n original_coords = cubelet_meta[\"coords\"]\n # current_coords = (mtx @ cubelet_meta['coords'].astype(float)).round().astype(int)\n\n cubie_desc = {}\n\n for prev_axis, sign in enumerate(original_coords):\n if sign != 0:\n vec = mtx[:, prev_axis] * sign\n new_axis = np.abs(vec).argmax()\n new_sign = vec[new_axis]\n\n color = PYCUBER_REVERSE_COLORS[prev_axis, sign]\n loc = PYCUBER_REVERSE_LOCATIONS[new_axis, new_sign]\n\n cubie_desc[loc] = pycuber.Square(color)\n\n if len(cubie_desc) == 3:\n cubies.append(pycuber.Corner(**cubie_desc))\n elif len(cubie_desc) == 2:\n cubies.append(pycuber.Edge(**cubie_desc))\n if cubelet_meta[\"type\"] == \"driver\":\n original_coords = cubelet_meta[\"coords\"]\n axis = np.abs(original_coords).argmax()\n sign = original_coords[axis]\n\n color = PYCUBER_REVERSE_COLORS[axis, sign]\n loc = PYCUBER_REVERSE_LOCATIONS[axis, sign]\n\n cubie_desc = {loc: pycuber.Square(color)}\n cubies.append(pycuber.Centre(**cubie_desc))\n\n return pycuber.Cube(cubies=cubies)",
"def add_cube(self, cube, name, size=None, lbda=None, add_white=False,\n unit_size=u.arcsec, unit_wave=u.angstrom):\n if size is None:\n size = self.default_size\n unit_size = u.arcsec\n\n subcub = cube.subcube(center=(self.dec, self.ra), size=size,\n unit_center=u.deg, unit_size=unit_size)\n\n if add_white:\n self.images['MUSE_WHITE'] = subcub.mean(axis=0)\n\n if lbda is not None:\n subcub = subcub.select_lambda(lbda[0], lbda_max=lbda[1],\n unit_wave=unit_wave)\n\n self.cubes[name] = subcub",
"def setspectral(self, *args, **kwargs):\n return _coordsys.coordsys_setspectral(self, *args, **kwargs)",
"def smooth_cube(self, width=3, **kwargs):\n # Validate input width\n num_dims = len(self.dimensions)\n wid_list = [1]*num_dims # NB: a width of 1 results in no smoothing\n if isinstance(width, (list, tuple)):\n # Note: we assume the last dim is always wavelength\n wid_list[0] = width[0]\n if num_dims > 2:\n wid_list[1] = width[1]\n print('Warning: smoothing over the x-axis can yield unexpected'\n +' results due to the time interval between observations.'\n +' Use with care.')\n\n if len(width) >= num_dims:\n print('Warning: smoothing over the wavelength axis is not'\n +' supported. Only widths for the Y & X axes will be used')\n elif isinstance(width, (int, float, u.Quantity)):\n wid_list[0] = width # Only smooth along y-axis\n else:\n print('Error: invalid width data type. Please input an int, float,'\n +' or astropy.units.Quantity instance', file=sys.stderr)\n return None\n\n coord_ax = ['y', 'x', 'w']\n for w in range(len(wid_list)-1):\n # Parse a astropy.units.Quantity and convert to units of pixels\n if isinstance(wid_list[w], u.Quantity):\n if wid_list[w].unit == u.pix:\n wid_list[w] = wid_list[w].value\n elif not wid_list[w].unit.physical_type == 'angle':\n print('Error: invalid width unit. Please input a pixel or'\n +' angular unit.', file=sys.stderr)\n return None\n else:\n try:\n # Note: y & x scales are in units of [arcsec]/[pixel]\n ax_scale = self.meta['pointing'][coord_ax[w]+'_scale']\n except KeyError:\n print('Error: missing '+coord_ax[w]+'-axis scale.')\n return None\n angular_wid_str = str(wid_list[w])\n wid_list[w] = wid_list[w].to('arcsec').value / ax_scale\n print('Note: on the '+coord_ax[w]+'-axis, '+angular_wid_str\n +' is equivalent to '+str(wid_list[w])+' pixels.')\n\n # Round to nearest pixel and add 1 to even values\n wid_list[w] = int(round(wid_list[w]))\n if wid_list[w] % 2 == 0:\n wid_list[w] = wid_list[w] + 1\n\n # Create smoothing kernel with normalized weights (i.e. sum to 1)\n # Note: Using a 2D or 3D kernel allows us to smooth everything at once\n sm_weights = np.ones(wid_list) / (wid_list[0]*wid_list[1])\n sm_kernel = CustomKernel(sm_weights)\n\n # Calculate smoothed data and uncertainty values\n sm_data = convolve(self.data, sm_kernel, **kwargs)\n if self.uncertainty is not None:\n sm_errs = np.sqrt(convolve(self.uncertainty.array**2,\n sm_kernel, **kwargs))\n else:\n sm_errs = none\n sm_data_mask = np.logical_or(np.isnan(sm_data), sm_data < 0)\n\n # Pack everything up in a new EISCube\n old_radcal = self.radcal\n new_meta = copy.deepcopy(self.meta)\n new_meta['notes'].append('Smoothed using pixel widths of '+str(wid_list))\n wcs_mask = (np.array(tuple(reversed(self.wcs.array_shape))) <= 1).tolist()\n\n output_cube = EISCube(sm_data, wcs=self.wcs, uncertainty=sm_errs,\n wavelength=self.wavelength, radcal=old_radcal,\n meta=new_meta, unit=self.unit,\n mask=sm_data_mask, missing_axes=wcs_mask)\n\n return output_cube",
"def normalise(self, spectrum):\n\n return spectrum",
"def normalise(self, spectrum):\n\n return spectrum",
"def index_as_cube(self):\n return _IndexAsCubeSlicer(self)",
"def to_spherical(self):\n return cartesian_to_spherical(self.x, self.y, self.z)",
"def spectral():\n c = _si.c.value\n h = _si.h.value\n hc = h * c\n two_pi = 2.0 * np.pi\n inv_m_spec = si.m**-1\n inv_m_ang = si.radian / si.m\n\n return Equivalency(\n [\n (si.m, si.Hz, lambda x: c / x),\n (si.m, si.J, lambda x: hc / x),\n (si.Hz, si.J, lambda x: h * x, lambda x: x / h),\n (si.m, inv_m_spec, lambda x: 1.0 / x),\n (si.Hz, inv_m_spec, lambda x: x / c, lambda x: c * x),\n (si.J, inv_m_spec, lambda x: x / hc, lambda x: hc * x),\n (inv_m_spec, inv_m_ang, lambda x: x * two_pi, lambda x: x / two_pi),\n (si.m, inv_m_ang, lambda x: two_pi / x),\n (si.Hz, inv_m_ang, lambda x: two_pi * x / c, lambda x: c * x / two_pi),\n (si.J, inv_m_ang, lambda x: x * two_pi / hc, lambda x: hc * x / two_pi),\n ],\n \"spectral\",\n )",
"def spectral_model(self, which=\"point\"):\n idx = self._get_idx(which)\n\n pars = {\n \"reference\": \"7 TeV\",\n \"amplitude\": self.data[f\"spec{idx}_dnde\"],\n \"index\": -self.data[f\"spec{idx}_index\"],\n }\n\n errs = {\n \"amplitude\": self.data[f\"spec{idx}_dnde_err\"],\n \"index\": self.data[f\"spec{idx}_index_err\"],\n }\n\n model = Model.create(\"PowerLawSpectralModel\", \"spectral\", **pars)\n\n for name, value in errs.items():\n model.parameters[name].error = value\n\n return model",
"def get_wave(self, _spectrogram, _is_full_spectrogram=False):\n # in half spectrogram, 1st row is not in mirror image\n # last line row is same, so, we flip 2nd to last but one rows\n\n fft_coeffs = self.F\n if not _is_full_spectrogram:\n # generating full spectrogram from the upper half if 'is_full_spectrogram' is False\n full_spectrogram = np.vstack((_spectrogram[0],\n _spectrogram[1:-1],\n _spectrogram[-1],\n np.flipud(_spectrogram[1:-1]).conjugate()))\n else:\n full_spectrogram = _spectrogram\n\n # Doing Fourier inverse to get segmented matrix\n signal_segment_matrix = np.dot(fft_coeffs.conjugate().T, full_spectrogram).real\n # Recovering timeseries signal\n signal = self.reconstruct_signal(signal_segment_matrix)\n # Rescaling the signal\n # signal = (signal - signal.min()) / (signal.max() - signal.min())\n # signal = signal / signal.var()\n\n # returning signal\n return signal",
"def convert_spectrogram_to_audio(self, spec: 'torch.tensor', **kwargs) -> 'torch.tensor':",
"def mel_spectrogram(self, y):\n # assert(torch.min(y.data) >= -1)\n # assert(torch.max(y.data) <= 1)\n\n magnitudes, phases = self.stft_fn.transform(y)\n # magnitudes = magnitudes.data\n mel_output = torch.matmul(self.mel_basis, magnitudes)\n mel_output = self.spectral_normalize(mel_output)\n return mel_output",
"def convert_linear_spectrogram_to_audio(self, spec: 'torch.tensor', **kwargs) -> 'torch.tensor':",
"def extract_spectra(self, cube, obj_mask='MASK_UNION', sky_mask='MASK_SKY',\n tags_to_try=('MUSE_WHITE', 'NB_LYALPHA',\n 'NB_HALPHA', 'NB_SUMOII3726'),\n skysub=True, psf=None, beta=None, lbda=None,\n apertures=None, unit_wave=u.angstrom):\n if obj_mask not in self.images:\n raise ValueError('key %s not present in the images dictionary'\n % obj_mask)\n\n if skysub and sky_mask not in self.images:\n raise ValueError('key %s not present in the images dictionary'\n % sky_mask)\n\n ima = self.images[obj_mask]\n\n if ima.wcs.sameStep(cube.wcs):\n size = ima.shape[0]\n unit_size = None\n else:\n size = ima.wcs.get_step(unit=u.arcsec)[0] * ima.shape[0]\n unit_size = u.arcsec\n\n center = (self.dec, self.ra)\n subcub = cube.subcube(center=center, size=size,\n unit_center=u.deg, unit_size=unit_size,\n lbda=lbda, unit_wave=unit_wave)\n wcsref = subcub.wcs\n\n if not ima.wcs.isEqual(wcsref):\n ima = ima.resample(\n newdim=subcub.shape[1:],\n newstart=wcsref.get_start(unit=u.deg),\n newstep=wcsref.get_step(unit=u.arcsec),\n order=0, unit_start=u.deg, unit_step=u.arcsec)\n\n object_mask = ima.data.data\n\n if skysub:\n skymask = self.images[sky_mask]\n if not skymask.wcs.isEqual(wcsref):\n skymask = skymask.resample(\n newdim=subcub.shape[1:],\n newstart=wcsref.get_start(unit=u.deg),\n newstep=wcsref.get_step(unit=u.arcsec),\n order=0, unit_start=u.deg, unit_step=u.arcsec)\n\n # Get the sky spectrum and subtract it\n self.spectra['MUSE_SKY'] = subcub.mean(weights=skymask.data.data,\n axis=(1, 2))\n subcub = subcub - self.spectra['MUSE_SKY']\n suffix = '_SKYSUB'\n else:\n suffix = ''\n\n # No weighting\n spec = (subcub * object_mask).sum(axis=(1, 2))\n self.spectra['MUSE_TOT' + suffix] = spec\n\n if apertures:\n tmpim = Image(data=np.zeros_like(object_mask, dtype=bool),\n copy=False, wcs=ima.wcs)\n for radius in apertures:\n tmpim.mask_ellipse(center, radius, 0)\n mask = object_mask.astype(bool) & tmpim.mask\n # spec = compute_spectrum(subcub, weights=mask)\n spec = (subcub * mask).sum(axis=(1, 2))\n self.spectra['MUSE_APER_%.1f%s' % (radius, suffix)] = spec\n tmpim.unmask()\n\n # Loop over the narrow-band images we want to use. Apply the object\n # mask and ensure that the weight map within the object mask is >=0.\n if tags_to_try is not None:\n nb_tags = list(set(tags_to_try) & set(self.images))\n ksel = (object_mask != 0)\n for tag in nb_tags:\n if self.images[tag].wcs.isEqual(wcsref):\n weight = self.images[tag].data.copy()\n weight[ksel] -= np.min(weight[ksel])\n weight = weight.filled(0)\n self.spectra[tag + suffix] = compute_optimal_spectrum(\n subcub, object_mask, weight)\n\n # PSF\n if psf is not None:\n if len(psf.shape) == 3:\n # PSF cube. The user is responsible for getting the\n # dimensions right\n if not np.array_equal(psf.shape, subcub.shape):\n raise ValueError('Incorrect dimensions for the PSF cube '\n '({}) (it must be ({})) '\n .format(psf.shape, subcub.shape))\n elif len(psf.shape) == 1:\n psf = create_psf_cube(subcub.shape, psf, beta=beta, wcs=wcsref)\n\n spec = compute_optimal_spectrum(subcub, object_mask, psf)\n self.spectra['MUSE_PSF' + suffix] = spec\n # Insert the PSF weighted flux - here re-normalised?",
"def transform_cube(self,\n cube: xr.Dataset,\n gm: GridMapping,\n cube_config: CubeConfig) -> TransformedCube:\n return cube, gm, cube_config"
]
| [
"0.6274869",
"0.6003347",
"0.5845092",
"0.5763128",
"0.56499904",
"0.55344087",
"0.55215806",
"0.5233117",
"0.5228521",
"0.5198386",
"0.5195983",
"0.51448613",
"0.51295257",
"0.5123832",
"0.5117463",
"0.51086575",
"0.50924385",
"0.50752395",
"0.50532764",
"0.50532764",
"0.5052271",
"0.50365114",
"0.5025228",
"0.50172853",
"0.5015062",
"0.5012336",
"0.4975221",
"0.49724463",
"0.49710262",
"0.49196136"
]
| 0.8686489 | 0 |
Returns a numpy array containing the time values for the cube's time dimension, as well as the unit used. | def time_axis(self):
if self.axes_wcs.wcs.ctype[0] not in ['TIME', 'UTC']:
raise cu.CubeError(1, 'No time axis present')
delta = self.axes_wcs.wcs.cdelt[0]
crpix = self.axes_wcs.wcs.crpix[0]
crval = self.axes_wcs.wcs.crval[0]
start = crval - crpix * delta
stop = start + len(self.data) * delta
cunit = u.Unit(self.axes_wcs.wcs.cunit[0])
return np.linspace(start, stop, num=self.data.shape[-1]) * cunit | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def time(self):\n return self.time_array",
"def time_axis_array(self):\n return self._time_axis.get_array()",
"def arr_time(self):\n return self._arr_time",
"def times(self):\n if self._times is None:\n self._times = np.zeros((self.ntimestep), dtype=np.float64)\n for record in range(self.ntimestep):\n self._times[record] = self.get_data_time(record)\n\n return self._times",
"def _write_time_cube(self, cube, key_list):\n data = cube.data[:]\n coords = cube.coord('time')[:]\n for t in range(0, data.shape[0]):\n value = round_variable(self.input_data.get_value(\n InputType.VARIABLE)[0], data[t])\n with iris.FUTURE.context(cell_datetime_objects=True):\n time_str = coords[t].cell(\n 0).point.strftime('%Y-%m-%d')\n try:\n self.data_dict[time_str].append(value)\n except KeyError:\n key_list.append(time_str)\n self.data_dict[time_str] = [value]",
"def get_time(start, stop, step = 1):\n # reshape array into columns from row\n return numpy.reshape(numpy.array(range(start, stop, step)), (-1,1))",
"def time_list(self):\n return (self.N_T * (np.arange(self.N_itr) + 1) /\n self.N_itr * 1000 * self.DT)",
"def rt_arr_time(self):\n return self._rt_arr_time",
"def get_time(self):\n return numpy.linspace(self.header.time_gate_start, \\\n self.header.time_gate_stop, self.num_time_bins())",
"def time(self):\n return self[self.time_columns]",
"def time(self):\n return self[self.time_columns]",
"def unit_array(self):\n return self._data_array.values * units(self._units)",
"def get_tim(self):\n return self.sum(axis=0)",
"def conv_time_units(cube1,cube2):\n time_coord1=cube1.coord('time')\n time_units1=time_coord1.units\n #\n time_coord2=cube2.coord('time')\n time_units2=time_coord2.units\n #\n new_time_vals=[time_units2.date2num(time_units1.num2date(xx)) for xx in time_coord1.points]\n new_time_coord=iris.coords.DimCoord(new_time_vals,standard_name='time',units=time_units2)\n #\n coord_names=[dimc.standard_name for dimc in cube1.dim_coords]\n time_index=coord_names.index('time')\n cube1.remove_coord('time')\n cube1.add_dim_coord(new_time_coord,time_index)",
"def get_time_unit(self, variables):\n if len(self.TIME_VARIABLE):\n # times = self._get_variable(variables, self.TIME_VARIABLE)[:]\n units = variables['time'].units\n return units\n else:\n return \"\"",
"def _generate_time_values(self):\r\n # Populate time values\r\n log('writing times', 'INFO')\r\n d1970 = datetime(1970, 1, 1, tzinfo=utc)\r\n time_array = [[int((self.start_datetime - d1970).total_seconds())]]\r\n \r\n datetime_nc_start_simulation = self.start_datetime\r\n for raw_nc_index, raw_nc in enumerate(self.raw_nc_list):\r\n \r\n raw_nc_time = raw_nc.get_time_array(datetime_simulation_start=datetime_nc_start_simulation,\r\n simulation_time_step_seconds=self.time_step_array[raw_nc_index])\r\n \r\n time_array.append(raw_nc_time)\r\n datetime_nc_start_simulation = datetime.utcfromtimestamp(raw_nc_time[-1])\r\n \r\n self.cf_nc.variables['time'][:] = np.concatenate(time_array)\r\n end_date = datetime.utcfromtimestamp(self.cf_nc.variables['time'][-1])\r\n self.cf_nc.time_coverage_start = self.start_datetime.isoformat() + 'Z'\r\n self.cf_nc.time_coverage_end = end_date.isoformat() + 'Z'",
"def result_array(self) -> np.ndarray:\n return np.array([r[\"time\"] for r in self.profile_result])",
"def get_time_slices(self):\n tot = []\n for clu in self._clusters:\n tot.extend(self._clusters[clu].to_dict()[:])\n #tot.sort()\n return tot",
"def _calc_time(time_lines: list) -> np.ndarray:\n time = [time_to_fraction_hour(line.split()[1]) for line in time_lines]\n return np.array(time)",
"def zeros(self):\n super(TimeCube, self).zeros()\n self.data = np.zeros([self.time_range[1]-self.time_range[0]]+self.cubesize, np.uint8)",
"def time(self):\n self.convert_window(\"Time\", \"seconds\", [\"centuries\", \"days\", \"decades\", \"femtoseconds\", \"fortnights\", \"hours\", \"microseconds\", \"millenia\", \"milliseconds\", \"minutes\", \"months(Common)\", \"months(Synodic)\", \"nanoseconds\", \"picoseconds\", \"quarters(Common)\", \"seconds\", \"shakes\", \"weeks\", \"years(Average Gregorian)\", \"years(Common)\", \"years(Julian)\", \"years(Leap)\", \"years(Tropical)\"])",
"def getTimeUnits(self):\n return _libsbml.Event_getTimeUnits(self)",
"def time(self):\r\n return UniformTime(length=self.__len__(), t0=self.t0,\r\n sampling_interval=self.sampling_interval,\r\n time_unit=self.time_unit)",
"def get_itime_section_data(date, time):\n return np.array([time, date.day, date.month, date.year, -2345, 1, 0, -2345, -2345, -2345, 0, 0, 0])",
"def time_units(self) -> str:\n return self._ll_tree_sequence.get_time_units()",
"def test_TimeArray_convert_unit():",
"def time_average(new_cube):\n\n time_average_cube = new_cube.collapsed('time', iris.analysis.MEAN)\n\n return time_average_cube",
"def getTimeUnits(self):\n return _libsbml.Model_getTimeUnits(self)",
"def get_time_array(array, axis_of_time_steps=2, start=0., end=1.):\r\n number_of_time_steps = np.shape(array)[axis_of_time_steps]\r\n return np.linspace(start, end, number_of_time_steps)",
"def get_time(self):\n return self._ticks"
]
| [
"0.7211374",
"0.6856966",
"0.68256146",
"0.67285633",
"0.65972865",
"0.6441097",
"0.63960433",
"0.6338288",
"0.6300424",
"0.6298656",
"0.6298656",
"0.62866175",
"0.62780577",
"0.62295645",
"0.6061472",
"0.6057796",
"0.6039089",
"0.60030895",
"0.5996386",
"0.59960365",
"0.59915805",
"0.59800357",
"0.597387",
"0.59660053",
"0.5950912",
"0.5950013",
"0.59471023",
"0.5930928",
"0.58964217",
"0.5891868"
]
| 0.6944582 | 1 |
Returns whether the wcs system and the array are wellaligned. | def _array_is_aligned(self):
rot_matrix = self.axes_wcs.wcs.pc
return np.allclose(rot_matrix, np.eye(self.axes_wcs.wcs.naxis)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isaligned(a: np.ndarray, alignment: int) -> bool:\n return (a.ctypes.data % alignment) == 0",
"def is_aligned(self):\n\n return self._bits == 0",
"def has_wcs(self):\n if self.header is None:\n return False\n\n required = 'CRPIX,CRVAL,CTYPE'.split(',')\n keywords = np.concatenate(\n [(lambda i: [r+str(i+1) for r in required])(i) \n for i in range(self.header['NAXIS'])])\n\n return all([k in self.header for k in keywords])",
"def _consistentWithWA_(self, span, lan):\n\t\tif lan == 'src':\n\t\t\twordAlign = self.waMatrix\n\t\telse:\n\t\t\twordAlign = [[self.waMatrix[i][j] for i in xrange(len(self.waMatrix))] for j in xrange(len(self.waMatrix[0]))] \n\n\t\tpos1 = [j for i in xrange(span[0], span[1]) for j in xrange(len(wordAlign[i])) if wordAlign[i][j] == 1]\n\t\tif pos1 == []: return True\n\n\t\tfor i in xrange(span[0], span[1]):\n\t\t\tfor j in xrange(min(pos1), max(pos1) + 1):\n\t\t\t\tif sum([wordAlign[row][j] for row in xrange(len(wordAlign[:span[0]]))]) == 0 and \\\n\t\t\t\t\t\tsum([wordAlign[row][j] for row in xrange(span[1], len(wordAlign))]) == 0:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\treturn False\n\t\t#print >> debug_log, 'consistent:', span\n\t\treturn True",
"def isAligned(self):\n return (\n abs(self.desired_distance - self.vision.getDistance())\n <= self.DISTANCE_TOLERANCE\n ) and (abs(self.vision.getHeading()) <= self.HEADING_TOLERANCE)",
"def has_wcs(self):\n return self.wcs is not None",
"def _check_wcs_structure(self, wcs):\n if wcs is None:\n return False, \"WCS cannot be None.\"\n\n if not wcs.is_celestial:\n return False, \"WCS must be exclusively a celestial WCS.\"\n\n wcs = wcs.deepcopy()\n naxis1, naxis2 = wcs.pixel_shape\n\n # check mapping of corners and CRPIX:\n pts = np.array([[1.0, 1.0], [1.0, naxis2], [naxis1, 1.0],\n [naxis1, naxis2], wcs.wcs.crpix])\n\n sky_all = wcs.all_pix2world(pts, 1)\n foc_all = wcs.pix2foc(pts, 1)\n\n # strip all *known* distortions:\n wcs.cpdis1 = None\n wcs.cpdis2 = None\n wcs.det2im1 = None\n wcs.det2im2 = None\n wcs.sip = None\n\n # check that pix2foc includes no other distortions besides the ones\n # that we have turned off above:\n if not np.allclose(pts, wcs.pix2foc(pts, 1)):\n False, \"'pix2foc' contains unknown distortions\"\n\n wcs.wcs.set()\n\n # check that pix2foc contains all known distortions:\n if not np.allclose(wcs.all_world2pix(sky_all, 1), foc_all, atol=1e-3,\n rtol=0):\n return False, \"'WCS.pix2foc()' does not include all distortions.\"\n\n return True, ''",
"def is_aligned(self, A):\n return unique_element(imap(lambda v: v[1], A)) is not None",
"def regular(self):\n return all(numpy.allclose(w, w[0]) for w in self.binwidths)",
"def is_wcsaxes(axes):\n return isinstance(axes, wcsaxes.WCSAxes)",
"def is_wide(self) -> bool:\n return self.layout == \"planar\"",
"def is_array(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_array)",
"def is_worthless(self):\n self.normalize()\n return self.all_details['normalized'] in WORTHLESS_UA_TYPES",
"def healthy_test(obj: np.ndarray) -> bool:\n nb_rows, nb_cols = obj.shape\n return nb_rows == nb_cols > 1 and np.array_equal(obj, colony(nb_rows))",
"def square(self):\n return self.regular and numpy.allclose(*(w[0] for w in self.binwidths))",
"def is_skew_symmetric(self):\n return self.all_equal(-self.transpose())",
"def maybe_rectal(self):\n return bool(set(self.locations) & set(StandardTerminology.RECTAL_LOCATIONS))",
"def _validate_axes(self):\n if not (\n np.abs(self.axis_u.dot(self.axis_v) < 1e-6)\n and np.abs(self.axis_v.dot(self.axis_w) < 1e-6)\n and np.abs(self.axis_w.dot(self.axis_u) < 1e-6)\n ):\n raise ValueError(\"axis_u, axis_v, and axis_w must be orthogonal\")\n return True",
"def maybe_distal(self):\n return bool(set(self.locations) & set(StandardTerminology.DISTAL_LOCATIONS))",
"def check_wf_2d(ψ: ndarray):\n pass",
"def is_pure(self) -> np.ndarray:\n return np.isclose(self.w, np.zeros_like(self.w.shape[0]))",
"def is_affine_st(A, tol=1e-10):\n (_, wx, _,\n wy, _, _,\n *_) = A\n\n return abs(wx) < tol and abs(wy) < tol",
"def isscalar(self):\n return not bool(self.shape)",
"def IsWholeWorld(self, resolution=None):\n if resolution is None:\n resolution = self.GetNativeResolution()\n\n spatial_ref = self.GetSpatialReference()\n world_extents = spatial_ref.GetWorldExtents()\n extents = self.GetExtents()\n ll_offset = world_extents.lower_left - extents.lower_left\n ur_offset = world_extents.upper_right - extents.upper_right\n\n pixel_sizes = spatial_ref.GetPixelDimensions(resolution=resolution)\n return (abs(ll_offset.x) <= pixel_sizes.x and\n abs(ll_offset.y) <= pixel_sizes.y and\n abs(ur_offset.x) <= pixel_sizes.x and\n abs(ur_offset.y) <= pixel_sizes.y)",
"def is_unitary(self):\n if self._coord_format != constants.MatrixCoordinateDefault:\n self._logger.error(\"invalid coordinate format\")\n raise NotImplementedError(\"invalid coordinate format\")\n\n round_precision = int(conf.get(\n self._sc, 'sparkquantum.math.roundPrecision'))\n\n return round(self.norm(), round_precision) == 1.0",
"def is_skew_component_fusion(self) -> bool:\n fcell = self.first_cell\n scell = self.second_cell\n if self._fuse_row:\n skew_ob = GriddedPerm((0, 1), (fcell, scell))\n else:\n skew_ob = GriddedPerm((0, 1), (fcell, scell))\n return skew_ob in self._tiling.obstructions",
"def is_symetric(self):\n for x, y in _triangle_range(self.size):\n\n if self.read(x, y) != self.read(y, x):\n return False\n\n return True",
"def isscalar(self):\n return not self.axes",
"def check(self):\n\n ok = True\n\n # check that each pair is ok on its own\n for winp in self.winps:\n if not winp.check():\n ok = False\n\n # check that no pairs overlap in the Y direction\n ystart0,xleft0,xright0,nx0,ny0 = self.winps[0].get()\n for winp in self.winps[1:]:\n ystart1,xleft1,xright1,nx1,ny1 = winp.get()\n if ystart0 is not None and ystart1 is not None and ny0 is not None and \\\n ystart1 < ystart0 + ny0:\n winp.ystart.config(bg=COL_WARN)\n ok = False\n ystart0,xleft0,xright0,nx0,ny0 = ystart1,xleft1,xright1,nx1,ny1\n\n return ok",
"def is_diagonal(self):\n return self.is_upper() and self.is_lower()"
]
| [
"0.6622194",
"0.66116005",
"0.64189285",
"0.629337",
"0.6231566",
"0.62093145",
"0.61557496",
"0.61207116",
"0.5808725",
"0.57705253",
"0.5653776",
"0.5611747",
"0.5607932",
"0.5607731",
"0.55909234",
"0.5550369",
"0.5533875",
"0.55202824",
"0.54235834",
"0.5402072",
"0.5395196",
"0.53438574",
"0.53215444",
"0.52825546",
"0.5260663",
"0.5254046",
"0.5246952",
"0.5214177",
"0.52027345",
"0.51926696"
]
| 0.74039537 | 0 |
parse case excel file. | def manual_case_parser(self):
data = self.excel_parser()
result = self.process_module_field(data)
self.add_class_name_field(result)
self.process_multi_step_field(result)
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_parse_sample_sheet(self):\n pass",
"def parse_ra():\r\n fname = 'parse/File1.xlsx'\r\n sheet_name = 'Sheet1'\r\n\r\n ra_sheet = get_sheet(fname, sheet_name)\r\n return parse_sheet(ra_sheet)",
"def parse_xlsx(filename):\n from openpyxl import load_workbook\n\n workbook = load_workbook(filename=filename)\n worksheet = workbook.get_sheet_by_name(workbook.get_sheet_names()[0])\n row_it = worksheet.iter_rows()\n split_row_list = ([cell.value if cell.value is not None else \"\"\n for cell in row] for row in row_it)\n\n return parse_generic(split_row_list)",
"def read_xlsx(self, filename):\n xlsx = pd.ExcelFile(filename)\n for sheet in xlsx.sheet_names:\n table_index_header = cfg.get_list(\"table_index_header\", sheet)\n self.input_data[sheet] = xlsx.parse(\n sheet,\n index_col=list(range(int(table_index_header[0]))),\n header=list(range(int(table_index_header[1]))),\n squeeze=(\"series\" not in sheet),\n )\n self.check_input_data(warning=False)\n self.add_meta_data()\n return self",
"def parse(csv_file, xlsx_file):\n\n #Summary info\n no_dsa_pattern = r'\\((\\d+)/(\\d+)\\).*don\\'t have Deep Security installed'\n disable_ips_pattern = r'\\((\\d+)/(\\d+)\\).*don\\'t have Intrusion Prevention enabled'\n no_rule_pattern = r'\\((\\d+)/(\\d+)\\).*don\\'t have rules assigned'\n summary_patterns = re.compile('{}|{}|{}'.format(no_dsa_pattern, disable_ips_pattern, no_rule_pattern))\n no_dsa_count = None\n no_ips_count = None\n no_rule_count = None\n description = []\n summary = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n\n #Detail info\n no_dsa_list_title = r\"Hosts that don't have Deep Security installed\"\n disable_ips_list_title = r\"Hosts that have Deep Security installed but don't have Intrusion Prevention enabled\"\n no_rule_list_title = r\"Hosts that have Intrusion Prevention enabled but no rules assigned\"\n title_patterns = re.compile('({})|({})|({})'.format(no_dsa_list_title, disable_ips_list_title, no_rule_list_title))\n no_dsa_list = []\n no_ips_list = []\n no_rule_list = []\n host_list = [\n no_dsa_list,\n no_ips_list,\n no_rule_list\n ]\n\n #Help url\n action_pattern = re.compile('\\[(.+)\\: (.+)\\]')\n action_list = []\n\n\n #Parse csv file\n with open(csv_file, 'r', newline='') as csv_file:\n rows = csv.reader(csv_file, quotechar='\"', delimiter=',', skipinitialspace=True)\n for index, row in enumerate(rows):\n if not row:\n continue\n\n if row[0].startswith('#'):\n description.append(row[0].replace('# ', ''))\n continue\n\n search = summary_patterns.search(row[0])\n if search:\n current_pattern = next((i for i, x in enumerate(search.groups()) if x), None)\n summary[int(current_pattern/2)] = ( int(search.group(current_pattern+1)), int(search.group(current_pattern+2)) - int(search.group(current_pattern+1)) )\n continue\n\n search = action_pattern.search(row[0])\n if search:\n action_list.append((search.group(1), search.group(2)))\n if len(action_list) == 4:\n break\n\n current_pattern = -1\n for index, row in enumerate(rows):\n if not row or re.match('None', row[0]):\n continue\n\n search = title_patterns.search(row[0])\n if search:\n current_pattern = next((i for i, x in enumerate(search.groups()) if x), None)\n continue\n\n if current_pattern == -1:\n continue\n host_list[current_pattern].append(row[0])\n\n no_dsa_count, no_ips_count, no_rule_count = summary\n\n #Write xlsx file\n workbook = xlsxwriter.Workbook(xlsx_file)\n summary_worksheet = workbook.add_worksheet('Summary')\n rawdata_worksheet = workbook.add_worksheet('Data')\n no_dsa_worksheet = workbook.add_worksheet(\"Hosts that don't have DS\")\n no_ips_worksheet = workbook.add_worksheet(\"Hosts that don't have IPS\")\n no_rule_worksheet = workbook.add_worksheet(\"Hosts that don't have rules\")\n\n #Rawdata worksheet\n data = [\n ['', 'Deep Security installed', 'Intrusion Prevention enabled', 'rules assigned'],\n ['Yes', no_dsa_count[1], no_ips_count[1], no_rule_count[1]],\n ['No', no_dsa_count[0], no_ips_count[0], no_rule_count[0]]\n ]\n rawdata_worksheet.write_column('A1', data[0], workbook.add_format({'bold': True}))\n rawdata_worksheet.write_column('B1', data[1])\n rawdata_worksheet.write_column('C1', data[2])\n\n #No DSA install worksheet\n no_dsa_worksheet.write_row(0, 0, [no_dsa_list_title])\n no_dsa_worksheet.write_column(1, 0, no_dsa_list)\n\n #No IPS enable worksheet\n no_ips_worksheet.write_row(0, 0, [disable_ips_list_title])\n no_ips_worksheet.write_column(1, 0, no_ips_list)\n\n #No Rule worksheet\n no_rule_worksheet.write_row(0, 0, [no_rule_list_title])\n no_rule_worksheet.write_column(1, 0, no_rule_list)\n\n #Summary worksheet\n chart = workbook.add_chart({'type': 'column', 'subtype': 'percent_stacked'})\n chart.add_series({\n 'name': '=Data!$B$1',\n 'categories': '=Data!$A$2:$A$4',\n 'values': '=Data!$B$2:$B$4',\n 'fill': {'color': '#21BC3B'}\n })\n chart.add_series({\n 'name': '=Data!$C$1',\n 'categories': '=Data!$A$2:$A$4',\n 'values': '=Data!$C$2:$C$4',\n 'fill': {'color': '#C22828'}\n })\n chart.set_title ({'name': 'Protected hosts'})\n chart.set_x_axis({\n 'name': 'Status',\n })\n\n chart.set_y_axis({\n 'major_unit' : 0.2,\n 'min' : 0,\n 'max' : 1\n })\n\n summary_worksheet.insert_chart('A1', chart)\n summary_worksheet.write_column('A16', description)\n for index, (text, url) in enumerate(action_list):\n summary_worksheet.write_url('A'+str(index+20), url, string = text)\n summary_worksheet.activate()\n\n workbook.close()",
"def Excel_Load_Data( self, ExcelFilename ):\n pass",
"def parse_tables_xlsx(inp):\n # --------------------------------------------------------------------------\n # Start\n # --------------------------------------------------------------------------\n raw_read = pd.read_excel(inp,sheet_name = None)\n indx = get_tab_index(raw_read)\n # --------------------------------------------------------------------------\n # Get the individual tables from the file\n # --------------------------------------------------------------------------\n tabdict = {}\n for i in indx['tab'].to_list():\n tabdict[i] = get_table_df(raw_read[i])\n # --------------------------------------------------------------------------\n # Finish\n # --------------------------------------------------------------------------\n out = {}\n out['indx'] = indx\n out['tabs'] = tabdict\n return out",
"def open_file(path):\n book = xlrd.open_workbook(path)\n # print number of sheets\n #print book.nsheets\n # print sheet names\n #print book.sheet_names()\n # get the first worksheet\n first_sheet = book.sheet_by_index(0)\n # read a row\n #print first_sheet.row_values(0)\n # read a cell\n cell = first_sheet.cell(1,0)\n #print cell\n #print cell.value\n # read a row slice\n #print first_sheet.row_slice(rowx=0,start_colx=0,end_colx=2)\n\n \"\"\"\n if Junipter.search_junipter_rule(first_sheet,1) == 0:\n print \"Juniper rule doesn't match\"\n else:\n print \"Juniper rule match\"\n \"\"\"\n\n \"\"\"\n if Mitac.search_mitac_rule(first_sheet,1) == 0:\n print \"Mitac rule doesn't match\"\n else:\n print \"Mitac rule match\"\n \"\"\"\n\n if Fabrinet.search_fabrinet_rule(first_sheet,3) == 0:\n print \"fabrinet rule doesn't match\"\n else:\n print \"fabrinet rule match\"",
"def parseSheet(self):\n self.log.info(\"Parsing {0} rows and {1} columns.\".format(self.rowns,self.colns))\n \n self.column_dimensions = {}\n self.property_dimensions = {}\n self.row_dimensions = {}\n self.rowhierarchy = {}\n\n # Get dictionary of annotations\n self.annotations = self.r_sheet.cell_note_map\n \n for i in range(0,self.rowns):\n self.rowhierarchy[i] = {}\n \n for j in range(0, self.colns):\n # Parse cell data\n self.source_cell = self.r_sheet.cell(i,j)\n self.source_cell_name = cellname(i,j)\n self.style = self.styles[self.source_cell].name\n self.cellType = self.getType(self.style)\n self.source_cell_qname = self.getQName(self.source_cell_name)\n \n self.log.debug(\"({},{}) {}/{}: \\\"{}\\\"\". format(i,j,self.cellType, self.source_cell_name, self.source_cell.value))\n\n # Try to parse ints to avoid ugly _0 URIs\n try:\n if int(self.source_cell.value) == self.source_cell.value:\n self.source_cell.value = int(self.source_cell.value)\n except ValueError:\n self.log.debug(\"(%s.%s) No parseable int\" % (i,j))\n\n \n # Parse annotation (if any)\n if self.config.get('annotations', 'enabled') == \"1\":\n if (i,j) in self.annotations:\n self.parseAnnotation(i, j)\n\n # Parse cell even if empty\n if self.cellType == 'Data':\n self.parseData(i, j)\n elif (self.cellType == 'HRowHeader') :\n self.updateRowHierarchy(i, j)\n elif self.cellType == 'ColHeader' :\n self.parseColHeader(i, j)\n elif self.cellType == 'RowProperty' :\n self.parseRowProperty(i, j)\n \n # If cell not empty, check for more types\n if not self.isEmpty(i,j) :\n #self.graph.add((self.namespaces['scope'][self.source_cell_qname],RDF.type,self.namespaces['tablink'][self.cellType]))\n #self.graph.add((self.namespaces['scope'][self.source_cell_qname],self.namespaces['tablink']['cell'],Literal(self.source_cell_name)))\n #self.graph.add((self.namespaces['scope'][self.source_cell_qname],self.namespaces['tablink']['col'],Literal(colname(j))))\n #self.graph.add((self.namespaces['scope'][self.source_cell_qname],self.namespaces['tablink']['row'],Literal(i+1)))\n #self.graph.add((self.namespaces['scope'][self.source_cell_qname] isrow row\n if self.cellType == 'Title' :\n self.parseTitle(i, j)\n \n elif self.cellType == 'RowHeader' :\n self.parseRowHeader(i, j)\n \n elif self.cellType == 'HRowHeader' :\n self.parseHierarchicalRowHeader(i, j)\n \n elif self.cellType == 'RowLabel' :\n self.parseRowLabel(i, j)\n \n # Add additional information about the hierarchy of column headers\n for value in self.column_dimensions.values():\n for index in range(1, len(value)):\n uri_sub = self.getColHeaderValueURI(value[:index+1])\n uri_top = self.getColHeaderValueURI(value[:index])\n self.graph.add((uri_sub, self.namespaces['tablink']['subColHeaderOf'], uri_top))\n self.graph.add((uri_sub, self.namespaces['tablink']['depth'], Literal(index)))\n self.graph.add((uri_top, self.namespaces['tablink']['depth'], Literal(index-1)))\n \n self.log.info(\"Done parsing...\")",
"def read_excel_file(self):\n self.df = pd.read_excel(str(self.file_path))\n self.data_mat=np.array(self.df).astype(float).transpose()",
"def parse_csv(filename):\n cases = []\n\n with open(filename) as file:\n # Read the rest of the lines\n for line in file:\n cases.append(Case(line))\n\n return cases",
"def getCaseData(filename):\n filename = filename\n casedata = pd.read_table(filename, sep=',', skiprows=3, parse_dates=True, \\\n infer_datetime_format=True, index_col=0, na_filter=False, \\\n names=[\"time\", \"value\", \"flag\"], true_values=['M'], false_values=[''])\n return casedata",
"def parse_xlsx_sheet(f, n=0):\n xl_file = pd.ExcelFile(f)\n dfs = xl_file.parse(xl_file.sheet_names[n], na_values=['n.a.', 'n.d.'])\n return dfs",
"def read_excel_file(self):\n self.df = pd.read_excel(str(self.file_path))\n self.data_mat=np.array(self.df).astype(float)",
"def parse_files(self):\n \"\"\" @param name: name of the file \"\"\"\n \"\"\" @type name: string \"\"\"\n df = pd.DataFrame()\n if not self.xlfnames:\n self.producer(\"THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR\", 'data source not found or cannot be open')\n logging.error('error happened: no excel files found')\n return False\n for fileName in self.xlfnames:\n try:\n xlfname = self.folder + '/' + fileName #\n xl = pd.ExcelFile(xlfname)\n except Exception as e:\n self.producer(\"THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR\",\n 'data source not found or cannot be open', e)\n return False\n try:\n # code for one file per sheet\n # for sheet in xl.sheet_names:\n # self.parse_sheet(xl,sheet)\n\n # code for one file for all\n for sheet in xl.sheet_names:\n df_tmp = self.parse_sheet_to_df(xl, sheet, df)\n df = df.append(df_tmp, ignore_index=True)\n except Exception as e:\n self.producer(\"THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR\", 'data source format is not as expected',\n e)\n return False\n return self.write_to_file(df)",
"def load_from_excel(self, excel_fp: str):\n # TODO:\n pass",
"def read_data(filename):\n \n # Iterate over all X-values. Y-values are stored in colummns of particular worksheet\n for x in range(0,13):\n\n wb = xlrd.open_workbook(filename)\n ws = wb.sheet_by_index(0)\n\n # This position of metadata doesn't change its relative position from sheet-to-sheet\n n_energy = int(ws.cell_value(1,3))\n n_iter = int(ws.cell_value(4,3))\n Rows_to_Skip = 15\n\n # Rename columns\n column_names = [str(x) for x in range(0,n_iter)]\n column_names.insert(0,'nan')\n column_names.insert(0,'KE')\n\n # Read data using pandas\n df_data = pd.read_excel(io = filename,\n sheet_name=x,\n skiprows = Rows_to_Skip,\n names = column_names,\n index_col='KE'\n )\n # Drop the second column as it is always supposed to be false\n df_data.drop(columns=df_data.columns[0],inplace=True)\n \n # Get x_data as the index \n x_array = np.array(df_data.index).reshape(len(df_data.index),1)\n \n # If we encounter first sheet\n if x==0:\n y = df_data.to_numpy()\n \n # Stack with the cummulative y built till now\n else:\n y = np.hstack((y, df_data.to_numpy()))\n \n # Ideally x_array should be (481, 1), and y should be (481, 169)\n return x_array, y",
"def start_parse(self, file_path, data=None):\n with open_xlsb(file_path) as wb:\n\n cols = []\n data_frame = []\n with wb.get_sheet(data) as sheet:\n for row in sheet.rows():\n l = [item.v for item in row]\n if not cols:\n cols = l\n data_frame.append(l)\n\n data_frame = pd.DataFrame(data_frame[1:], columns=data_frame[0])\n data = data_frame.to_dict(\"split\")[\"data\"]\n\n parsable = []\n for d in data:\n for i in d:\n if str(i) == \"nan\" and math.isnan(i):\n return None\n parsable.append(dict(zip(cols, d)))\n\n return json.dumps(parsable)",
"def import_data(self):\n\n self.worksheet = (\n xlrd.open_workbook(filename=self.source).sheet_by_index(0)\n )\n # Import conversion data from worksheet and store as scipy arrays\n self.T_exp = np.array(\n self.worksheet.col_values(0, start_rowx=4, end_rowx=None)\n ) + 273.15\n self.HCout_raw = np.array(\n self.worksheet.col_values(4, start_rowx=4, end_rowx=None)\n )\n self.HCin_raw = np.array(\n self.worksheet.col_values(8, start_rowx=4, end_rowx=None)\n )\n self.eta_exp = (\n (self.HCin_raw - self.HCout_raw) / self.HCin_raw\n )\n self.T_model = np.linspace(\n self.T_exp[0] - 50, self.T_exp[-1] + 50, 25\n )\n self.T_array = self.T_model",
"def readxl(df, file_name):\r\n print( 'reading the excel file') \r\n df = pd.read_excel(file_name, sheet_name = 'Sheet1')\r\n df.columns = [x.strip('/s+').lower().replace(' ','_').replace('.','_') for x in df.columns]\r\n print('list of columns of data frame ')\r\n print(df.columns)\r\n print('shape of data frame')\r\n print(df.shape)\r\n print('reading excel file completed')\r\n return(df)",
"def extract(self):\n \n print('Extracting Metrics data... ',end=''),\n self.df = pd.read_excel(self.file_path, index_col=0)\n print('Done')",
"def parse_cases(raw_data_file: str, source_id: str, source_url: str):\n with open(raw_data_file, \"r\") as f:\n reader = csv.DictReader(f, delimiter=\";\")\n for row in reader:\n if row[_CLASSIFICATION] == \"Confirmados\":\n try:\n case = {\n \"caseReference\": {\"sourceId\": source_id, \"sourceUrl\": source_url},\n \"location\": {\n \"query\": \", \".join(\n [row[_MUNICIPALITY], \"Espirito Santo\", \"Brazil\"]\n )\n },\n \"events\": convert_events(\n row[_DATE_CONFIRMED],\n row[_PCR],\n row[_SEROLOGICAL_METHOD1],\n row[_SEROLOGICAL_METHOD2],\n row[_SEROLOGICAL_METHOD3],\n row[_TEST_CLASS],\n row[_OUTCOME],\n row[_DATE_DEATH],\n row[_HOSPITALIZED]\n ),\n \"symptoms\": convert_symptoms(\n row[_HEADACHE],\n row[_SORE_THROAT],\n row[_FEVER],\n row[_COUGH],\n row[_DIARRHOEA],\n row[_DIFFICULTY_BREATHING]\n ),\n \"demographics\": convert_demographics(\n row[_GENDER], row[_AGE], row[_ETHNICITY], row[_HEALTHCARE_PROFESSIONAL]\n ),\n }\n case[\"preexistingConditions\"] = convert_preexisting_conditions(\n row[_LUNG], row[_KIDNEY], row[_DIABETES], row[_CARDIOVASCULAR], row[_OBESITY]\n )\n notes = convert_notes(\n row[_CORYZA],\n row[_SMOKER],\n row[_HOMELESS],\n row[_NEIGHBOURHOOD],\n row[_DISABILITY],\n row[_INTERNAL_TRAVEL],\n row[_INTERNATIONAL_TRAVEL],\n row[_SCHOOLING]\n )\n if notes:\n case[\"restrictedNotes\"] = notes\n yield case\n except ValueError as ve:\n raise ValueError(f\"error converting case: {ve}\")",
"def process_xlsx(content):\n data = {}\n workbook = xlrd.open_workbook(file_contents=content)\n worksheets = [w for w in workbook.sheet_names() if not w.startswith('_')]\n for worksheet_name in worksheets:\n if worksheet_name.startswith('_'):\n continue\n\n worksheet = workbook.sheet_by_name(worksheet_name)\n\n merged_cells = worksheet.merged_cells\n if len(merged_cells):\n raise MergedCellError(worksheet.name, merged_cells)\n\n worksheet.name = slughifi(worksheet.name)\n headers = make_headers(worksheet)\n worksheet_data = make_worksheet_data(headers, worksheet)\n data[worksheet.name] = worksheet_data\n return data",
"def process_data(self, excel_file, output_title):\n df = read_excel(excel_file)\n labels = df.columns.values.tolist()\n title = f\"{labels[1]} vs {labels[0]}\"\n data = []\n for label in labels:\n data.append(df[label].values.tolist())\n\n for callback in self.callbacks:\n callback(title, data, labels, output_title)",
"def get_xls(xls_name, sheet_name):\n cls = []\n # get xls file's path\n xlsPath = os.path.join(proDir, \"testFile\", 'case', xls_name)\n # open xls file\n file = open_workbook(xlsPath)\n # get sheet by name\n sheet = file.sheet_by_name(sheet_name)\n # get one sheet's rows\n nrows = sheet.nrows\n for i in range(nrows):\n if sheet.row_values(i)[0] != u'case_name':\n cls.append(sheet.row_values(i))\n return cls",
"def main():\n\n era = dt.datetime.now()\n\n parser = xlslisp_compile_argdoc()\n args = parser.parse_args()\n\n space = os.path.splitext(args.file)[0]\n\n # Import the Values of Sheets of one Xlsx File\n\n sheet_by_name = openpyxl.load_workbook(args.file, data_only=True)\n sheet_by_name_keys_list = sheet_by_name.sheetnames\n\n stderr_print(\n \"xlslisp: reading {} sheets from: {}\".format(\n len(sheet_by_name_keys_list), args.file\n )\n )\n\n # Option to quit early\n\n if not args.force:\n stderr_print(\n \"xlslisp.py: Xlsx imported, run again with --force to replace Csv's\"\n )\n\n sys.exit(1)\n\n # Visit each Sheet\n\n for (index, sheetname) in enumerate(sheet_by_name_keys_list):\n sheet = sheet_by_name[sheetname]\n\n csv_name = \"{space}-{dashed_sheet}.csv\".format(\n space=space, dashed_sheet=sheetname.replace(\" \", \"-\")\n ).lower()\n\n # Collect Rows of String Values\n\n csv_ragged_rows = list()\n for row_index in range(sheet.max_row):\n row_mark = 1 + row_index\n\n csv_cells = list()\n\n for col_index in range(sheet.max_column):\n cell = sheet.cell(1 + row_index, 1 + col_index)\n col_mark = cell.column_letter\n assert col_mark == excel_az_mark(col_index)\n\n if False:\n if (col_mark, row_mark) == (\"C\", 89):\n pdb.set_trace()\n\n csv_cells.append(cell.value)\n\n # Warn of trailing spaces\n\n if str(csv_cells[-1]).endswith(\" \"):\n stderr_print(\n \"xlslisp: Warning: \"\n \"could rstrip cell at: {!r}!{}{} {}\".format(\n sheetname, col_mark, row_mark, csv_cells[-1]\n )\n )\n\n csv_ragged_rows.append(csv_cells)\n\n # Format as rectangular Csv to please GitHub\n #\n # per GitHub > Rendering CSV and TSV data\n # flagging ragged as \"we can make this file beautiful and searchable\"\n #\n\n csv_rows = rows_complete(csv_ragged_rows, cell=None)\n\n charstream = io.StringIO()\n csv_writer = csv.writer(charstream)\n for csv_cells in csv_rows:\n csv_writer.writerow(csv_cells)\n\n charstream.seek(0)\n csv_chars = charstream.read()\n\n # Write the lines with local \"os.linesep\" line-ending's\n # specifically Not the mix of \"\\r\\n\" and \"\\n\" from multi-line Excel cells\n # but without rstrip'ping the lines # TODO: poor choice to skip rstrip?\n\n csv_lines = csv_chars.splitlines()\n csv_joined = \"\\n\".join(csv_lines) + \"\\n\"\n\n stderr_print(\n \"xlslisp: writing {} chars of {} rows to: {}\".format(\n len(csv_joined), sheet.max_row, csv_name\n )\n )\n\n with open(csv_name, \"w\") as csv_writing:\n csv_writing.write(csv_joined)\n\n now = dt.datetime.now()\n stderr_print(\"xlslisp: elapsed time of\", (now - era), \"since\", era)\n\n sys.exit(0)",
"def parse(instr):\n # TODO: a spreadsheet should really be an object and cellval a method\n global DATA, MESSAGES, CURRENT_ROW\n DATA = {}\n MESSAGES = []\n CURRENT_ROW = None\n\n updated = {}\n parse_gspreadsheet(instr, updated)\n # find header row: look for \"opportunity title\" (case insensitive)\n find_header_row('opportunity\\s*title')\n if not HEADER_ROW or not HEADER_STARTCOL:\n return len(MESSAGES), DATA, MESSAGES, [], []\n\n header_colidx = {}\n header_names = {}\n header_col = HEADER_STARTCOL\n while True:\n header_str = cellval(HEADER_ROW, header_col)\n if not header_str:\n break\n field_name = None\n header_str = header_str.lower()\n if header_str.find(\"title\") >= 0:\n field_name = \"OpportunityTitle\"\n elif header_str.find(\"organization\") >= 0 and \\\n header_str.find(\"sponsor\") >= 0:\n field_name = \"SponsoringOrganization\"\n elif header_str.find(\"description\") >= 0:\n field_name = \"Description\"\n elif header_str.find(\"skills\") >= 0:\n field_name = \"Skills\"\n elif header_str.find(\"location\") >= 0 and header_str.find(\"name\") >= 0:\n field_name = \"LocationName\"\n elif header_str.find(\"street\") >= 0:\n field_name = \"LocationStreet\"\n elif header_str.find(\"city\") >= 0:\n field_name = \"LocationCity\"\n elif header_str.find(\"state\") >= 0 or header_str.find(\"province\") >= 0:\n field_name = \"LocationProvince\"\n elif header_str.find(\"zip\") >= 0 or header_str.find(\"postal\") >= 0:\n field_name = \"LocationPostalCode\"\n elif header_str.find(\"country\") >= 0:\n field_name = \"LocationCountry\"\n elif header_str.find(\"start\") >= 0 and header_str.find(\"date\") >= 0:\n field_name = \"StartDate\"\n elif header_str.find(\"start\") >= 0 and header_str.find(\"time\") >= 0:\n field_name = \"StartTime\"\n elif header_str.find(\"end\") >= 0 and header_str.find(\"date\") >= 0:\n field_name = \"EndDate\"\n elif header_str.find(\"end\") >= 0 and header_str.find(\"time\") >= 0:\n field_name = \"EndTime\"\n elif header_str.find(\"contact\") >= 0 and header_str.find(\"name\") >= 0:\n field_name = \"ContactName\"\n elif header_str.find(\"email\") >= 0 or header_str.find(\"e-mail\") >= 0:\n field_name = \"ContactEmail\"\n elif header_str.find(\"phone\") >= 0:\n field_name = \"ContactPhone\"\n elif header_str.find(\"website\") >= 0 or header_str.find(\"url\") >= 0:\n field_name = \"URL\"\n elif header_str.find(\"often\") >= 0:\n field_name = \"Frequency\"\n elif header_str.find(\"days\") >= 0 and header_str.find(\"week\") >= 0:\n field_name = \"DaysOfWeek\"\n elif header_str.find(\"paid\") >= 0:\n field_name = \"Paid\"\n elif header_str.find(\"commitment\") >= 0 or header_str.find(\"hours\") >= 0:\n field_name = \"CommitmentHours\"\n elif header_str.find(\"age\") >= 0 and header_str.find(\"min\") >= 0:\n field_name = \"MinimumAge\"\n elif header_str.find(\"kid\") >= 0:\n field_name = \"KidFriendly\"\n elif header_str.find(\"senior\") >= 0 and header_str.find(\"only\") >= 0:\n field_name = \"SeniorsOnly\"\n elif header_str.find(\"sex\") >= 0 or header_str.find(\"gender\") >= 0:\n field_name = \"SexRestrictedTo\"\n elif header_str.find(\"volunteer appeal\") >= 0:\n field_name = None\n else:\n parser_error(\"couldn't map header '\"+header_str+\"' to a field name.\")\n if field_name != None:\n header_colidx[field_name] = header_col\n header_names[header_col] = field_name\n #print header_str, \"=>\", field_name\n header_col += 1\n\n if len(header_names) < 10:\n parser_error(\"too few fields found: \"+str(len(header_names)))\n\n data_startrow = 0\n\n # check to see if there's a header-description row\n header_desc = cellval(HEADER_ROW+1, HEADER_STARTCOL)\n if not header_desc:\n parser_error(\"blank row not allowed below header row\")\n else:\n header_desc = header_desc.lower()\n data_startrow = HEADER_ROW + 1\n if header_desc.find(\"Opportunity Title\") >= 0:\n data_startrow += 1\n\n\n # find the data\n CURRENT_ROW = data_startrow\n blankrows = 0\n numopps = 0\n addr_ar = []\n urls_ar = []\n\n if data_startrow == 0:\n MESSAGES = []\n MESSAGES.append('Spreadsheet appears to be empty')\n return len(MESSAGES), DATA, MESSAGES, [], []\n\n while True:\n blankrow = True\n #rowstr = \"row=\"+str(row)+\"\\n\"\n record = {}\n record['LastUpdated'] = '0000-00-00'\n for field_name in header_colidx:\n col = header_colidx[field_name]\n val = cellval(CURRENT_ROW, col)\n if val:\n blankrow = False\n else:\n val = \"\"\n #rowstr += \" \"+field_name+\"=\"+val+\"\\n\"\n record[field_name] = val\n key = 'R'+str(CURRENT_ROW)+'C'+str(col)\n if (key in updated and\n updated[key] > record['LastUpdated']):\n record['LastUpdated'] = updated[key]\n if blankrow:\n blankrows += 1\n if blankrows > MAX_BLANKROWS:\n break\n else:\n numopps += 1\n blankrows = 0\n record['oppid'] = str(numopps)\n get_minmaxlen(record, 'OpportunityTitle', 4, 100)\n get_minmaxlen(record, 'SpecialSkills', -1, 1000)\n location_name = get_minmaxlen(record, 'LocationName', 4)\n if location_name.lower() == \"virtual\":\n is_virtual = True\n else:\n is_virtual = False\n\n if is_virtual:\n reason = \" for virtual opportunities-- if you want both a location and\"\n reason += \" a virtual opportunity, then provide two separate records.\"\n get_blank(record, \"LocationStreet\", reason)\n get_blank(record, \"LocationCity\", reason)\n get_blank(record, \"LocationProvince\", reason)\n get_blank(record, \"LocationPostalCode\", reason)\n get_blank(record, \"LocationCountry\", reason)\n else:\n # TODO: appengine 30sec timeouts render this ambiguous/confuse for users\n addr = recordval(record, \"LocationStreet\")\n addr += \" \"+recordval(record, \"LocationCity\")\n addr += \" \"+recordval(record, \"LocationProvince\")\n addr += \" \"+recordval(record, \"LocationPostalCode\")\n addr += \" \"+recordval(record, \"LocationCountry\")\n addr_ar.append(addr)\n \n start_date = recordval(record, \"StartDate\").lower()\n if start_date == \"ongoing\":\n ongoing = True\n elif start_date == \"\":\n parser_error(\"Start Date may not be blank.\")\n ongoing = True\n else:\n ongoing = False\n\n if ongoing:\n start_time = recordval(record, \"StartTime\")\n if start_time != \"\" and start_time != \"ongoing\":\n parser_error(\"ongoing event should have blank Start Time.\")\n end_date = recordval(record, \"EndDate\")\n if end_date != \"\" and end_date != \"ongoing\":\n parser_error(\"ongoing event should have blank End Date.\")\n end_time = recordval(record, \"EndTime\")\n if end_time != \"\" and end_time != \"ongoing\":\n parser_error(\"ongoing event should have blank End Time.\")\n else:\n get_dtval(record, \"StartDate\")\n get_tmval(record, \"StartTime\")\n get_dtval(record, \"EndDate\")\n get_tmval(record, \"EndTime\")\n\n email = recordval(record, \"ContactEmail\")\n if email != \"\" and email.find(\"@\") == -1:\n parser_error(\"malformed email address: \"+email)\n\n url = recordval(record, \"URL\")\n if url == \"\":\n parser_error(\"Website - this field is required.\")\n urls_ar.append(url)\n \n daysofweek = recordval(record, \"DaysOfWeek\").split(\",\")\n for dow in daysofweek:\n lcdow = dow.strip().lower()\n if lcdow not in [\"sat\", \"saturday\",\n \"sun\", \"sunday\",\n \"mon\", \"monday\",\n \"tue\", \"tues\", \"tuesday\",\n \"wed\", \"weds\", \"wednesday\",\n \"thu\", \"thur\", \"thurs\", \"thursday\",\n \"fri\", \"friday\", \"\"]:\n # TODO: support these alternates in the datahub!\n parser_error(\"malformed day of week: '%s'\" % dow)\n get_boolval(record, \"Paid\")\n get_intval(record, \"CommitmentHours\")\n get_intval(record, \"MinimumAge\")\n get_boolval(record, \"KidFriendly\")\n get_boolval(record, \"SeniorsOnly\")\n sexrestrict = recordval(record, \"SexRestrictedTo\")\n if sexrestrict.lower() not in [\"women\", \"men\", \"either\", \"\"]:\n parser_error(\"bad SexRestrictedTo-- try Men, Women, Either or (blank).\")\n org = recordval(record, 'SponsoringOrganization')\n if org == \"\":\n parser_error(\"missing Sponsoring Organization-- this field is required.\"+\n \" (it can be an informal name, or even a person's name).\")\n else:\n get_minmaxlen(record, 'SponsoringOrganization', 4, 100)\n get_minmaxlen(record, 'Description', 15, 3000)\n freq = recordval(record, 'Frequency').lower()\n if not (freq == \"\" or freq == \"once\" or freq == \"daily\" or\n freq == \"weekly\" or freq == \"every other week\" or \n freq == \"monthly\"):\n parser_error(\"unsupported frequency: '\"+\n recordval(record, 'Frequency')+\"'\")\n CURRENT_ROW += 1\n\n return len(MESSAGES), DATA, MESSAGES, addr_ar, urls_ar",
"def setUp(self):\n wb = open_workbook(filename=self.filename)\n\n self.port_values = {}\n\n # find sheets that contain cash\n sheet_names = wb.sheet_names()\n for sn in sheet_names:\n if len(sn) > 4 and sn[-4:] == '-BOC':\n # print('read from sheet {0}'.format(sn))\n ws = wb.sheet_by_name(sn)\n read_cash(ws, self.port_values)",
"def test_from_file_xls(self):\n with TemporaryDirectory() as tmp:\n fp, df_test = save_simple_dataframe(tmp, 'test.xls')\n df_read = BaseDataClass.from_file(fp).df\n self.assertEqual(\n pd.testing.assert_frame_equal(df_test, df_read),\n None,\n )",
"def main(xls, seqtype):\n data_extraction = {}\n # 1 - Load data\n logger.info(f'Load {xls}')\n manifest, metadata = load_xls(xls)\n # 2 - Check file and data\n logger.info(f'Start to validate XLS')\n check_samples(manifest, metadata)\n check_seqtype(manifest, seqtype)\n check_metadata(metadata, seqtype)\n logger.success(f'Successfully validate XLS')\n # 3 - Export XLS to TSV for Qiime2\n logger.info(f'Start to export XLS to TSV')\n data_extraction = extract_manifest(manifest, seqtype, data_extraction)\n data_extraction, metadata_vars = extract_metadata(metadata, seqtype, data_extraction)\n export_to_tsv_for_qiime(data_extraction, metadata_vars, seqtype)\n logger.success(f'Done')"
]
| [
"0.66888124",
"0.6109369",
"0.61021197",
"0.60966265",
"0.5916426",
"0.589535",
"0.58326924",
"0.5774381",
"0.56844836",
"0.5633092",
"0.5630392",
"0.56159174",
"0.56018525",
"0.55881256",
"0.55868655",
"0.55625945",
"0.55516446",
"0.55497164",
"0.5522616",
"0.55121005",
"0.5487045",
"0.5447336",
"0.5432209",
"0.54293597",
"0.5405304",
"0.5398603",
"0.5397802",
"0.53861046",
"0.53714776",
"0.53620636"
]
| 0.67918974 | 0 |
Add the "ClassName" filed. | def add_class_name_field(data):
for case in data:
case_id = case['No.']
filed_list = case_id.split('_')
filed_list = list(map(lambda x: x.title(), filed_list))
case['ClassName'] = ''.join(filed_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setClassName(self,whichDir):\n self.className = whichDir\n self.setClassMap({whichDir : 1})",
"def addClassRef(clazz):\n\n global h_classes\n header = \"class %s;\" % clazz\n if not header in h_classes:\n h_classes.append(header)",
"def add_class(self, name):\n if name is not None and not self.has_class(name):\n self._cached_class.append(name)\n self._update_class()",
"def add_class(self, period_num, class_name):\n self._classes[period_num] = class_name",
"def save_class(self, a, class_name):\n logging.debug(\"in save class \" + class_name)\n self.produce(\"class_name\", class_name)\n self.classes.append(class_name)\n self.begin('')",
"def add_class(self, new_class):\n index = self._counter\n self._counter += 1\n for element in new_class:\n self._class_names[element] = index\n node = self.part[index].append(element)\n self._place[element] = node",
"def add_class(self, cls):\n self.commands.append(cls)",
"def add_class(self, klass):\n if not issubclass(klass, DataClayObject):\n raise DataClayException(\"Can only use DataClayObject classes\")\n\n logger.debug(\"Adding class %s to the MetaClassFactory\", klass)\n class_container = klass._prepare_metaclass(self._namespace, self._responsible_account)\n\n # Save to the list, and bookmark the MetaClass\n # (for valid recursive behaviour, e.g. cycles)\n complete_name = class_container.name\n logger.debug(\"[add_class] Using `%s` as `name` field of Type\", complete_name)\n if complete_name not in self.types:\n self.types[complete_name] = UserType(\n signature=\"L{};\".format(complete_name).replace(\".\", \"/\"),\n includes=[],\n namespace=self._namespace,\n typeName=complete_name,\n )\n self.classes.append(class_container)\n\n parent = klass.__bases__[0]\n if parent is not DataClayObject:\n self.add_class(parent)\n\n logger.debug(\"Class %s finished\", class_container.name)",
"def update_css_class(kwargs, class_name):\n if \"className\" in kwargs:\n kwargs[\"className\"] += f\" {class_name}\"\n else:\n kwargs[\"className\"] = class_name",
"def add_class_to_ont(self, ID):\n self.ont.addClass(ID)\n self.ont.label(ID, self.label)\n self.ont.annotation(ID, 'IAO_0000115', self.definition)\n if 'equivalentTo' in self.pattern:\n self.ont.equivalentClasses(ID, self.equivalentTo)\n if 'subClassOf' in self.pattern:\n self.ont.subClassOf(ID, self.subClassOf)\n # Don't currently have means to generate GCIs!",
"def new_class(self, grp, name, class_type=\"NXcollection\"):\n sub = grp.require_group(name)\n sub.attrs[\"NX_class\"] = numpy.string_(class_type)\n return sub",
"def set_name(self, name):\n self.class_name = name",
"def write_class_name(annotation_file_path, class_name):\n root = etree.parse(annotation_file_path)\n objects = root.findall('object')\n \n for item in objects:\n name = item.find('name')\n name.text = class_name\n\n root.write(annotation_file_path, pretty_print=True)",
"def add_sclass(self, w: Wrapper, prop: Any) -> None:\n if not prop:\n return\n props = self.sclasses(w)\n if isinstance(prop, str):\n props.append(prop)\n else:\n props.extend(prop)\n self.set_sclasses(w, props)",
"def __set_name__(self, cls, name):\n pass",
"def register_class(self, cls, *, name=None):\n cls_name = self.host.cache_class(cls, name)\n self.register_constant(cls, cls_name)",
"def _add_class_to_map(self, model_key, class_dict):\n # Extract the class name.\n class_name = class_dict['class']\n\n try:\n # Attempt to access this class by name in the map.\n self.model_map['class'][class_name]\n except KeyError:\n # Class object does not exist. Map it.\n self.model_map['class'][class_name] = [model_key, class_dict]\n else:\n # This class name already exists, which will lead to\n # duplicates and failure.\n raise ItemExistsError('Class {} already exists in the map.'\n .format(class_name))",
"def CSSClasses(self):",
"def process_class(self, parent, cls):\n if cls.typemap.flat_name in self.class_map:\n raise RuntimeError(\"process_class: class {} already exists in class_map\"\n .format(cls.typemap.flat_name))\n self.class_map[cls.typemap.flat_name] = cls\n for var in cls.variables:\n self.add_var_getter_setter(parent, cls, var)\n cls.functions = self.define_function_suffix(cls.functions)",
"def visit_class(self, flags, scope, token, parent):\r\n\r\n # define the class name in the current scope\r\n # see visit_block\r\n #scope.define(SC_FUNCTION, token.children[0])\r\n scope.defer(token)",
"def contribute_to_class(self, cls, name):\n\n super(StdImageField, self).contribute_to_class(cls, name)\n signals.post_save.connect(self._rename_resize_image, sender=cls)\n signals.post_init.connect(self.set_variations, sender=cls)",
"def class_name(name: str) -> str:\n return text.pascal_case(utils.safe_snake(name, \"type\"))",
"def add_to_class(cls, name, value):\n if hasattr(value, 'contribute_to_class'):\n value.contribute_to_class(cls, name)\n if not name.startswith('_'):\n cls._fields[name] = value\n else:\n setattr(cls, name, value)",
"def __create_classname(self, fullname):\n return PACKAGE_NAME + \".\" + fullname",
"def set_class_list(self, L):\n\t\tself.class_list = L",
"def declare_class_property(self, name):\n\n self.objc_class.forced_properties.add(name)",
"def add_class_to_widget(widget, *css_classes):\n css_string = \" \".join(css_classes)\n if 'class' in widget.attrs:\n widget.attrs['class'] += ' {} '.format(css_string)\n else:\n widget.attrs['class'] = css_string",
"def bind_class(self, className, sequence=None, func=None, add=None):\n return super().bind_class(className, sequence, func, add)",
"def register(cls, class_):\n cls._registered[class_.tag()] = class_",
"def setup_class(klass):"
]
| [
"0.6673873",
"0.65873694",
"0.65540683",
"0.65310895",
"0.62807566",
"0.61844677",
"0.61496955",
"0.61109734",
"0.60883236",
"0.5851665",
"0.5794981",
"0.57527643",
"0.57015514",
"0.56917053",
"0.5677711",
"0.5670707",
"0.566091",
"0.56086993",
"0.55906165",
"0.55896807",
"0.55784124",
"0.55692834",
"0.5566955",
"0.55630785",
"0.5561454",
"0.5515173",
"0.5503372",
"0.5483222",
"0.54812634",
"0.54811925"
]
| 0.69511896 | 0 |
Validation of the implementaiton of periodic angle axis in Magnetic (MagFEMM) and Force (ForceMT) modules | def test_FEMM_periodicity_angle():
SPMSM_015 = load(join(DATA_DIR, "Machine", "SPMSM_015.json"))
assert SPMSM_015.comp_periodicity() == (9, False, 9, True)
simu = Simu1(name="test_FEMM_periodicity_angle", machine=SPMSM_015)
# Definition of the enforced output of the electrical module
I0_rms = 250 / sqrt(2)
Phi0 = 140 * pi / 180 # Maximum Torque Per Amp
Id_ref = (I0_rms * exp(1j * Phi0)).real
Iq_ref = (I0_rms * exp(1j * Phi0)).imag
simu.input = InputCurrent(
Id_ref=Id_ref,
Iq_ref=Iq_ref,
Na_tot=252 * 9,
Nt_tot=4 * 9,
N0=1000,
)
# Definition of the magnetic simulation: with periodicity
simu.mag = MagFEMM(
type_BH_stator=1,
type_BH_rotor=1,
is_periodicity_a=True,
is_periodicity_t=False,
nb_worker=cpu_count(),
Kmesh_fineness=2,
)
simu.force = ForceMT()
# Definition of the magnetic simulation: no periodicity
# Definition of the magnetic simulation: no periodicity
simu2 = simu.copy()
simu2.mag.is_periodicity_a = False
simu2.force = ForceMT()
# Run simulations
out = Output(simu=simu)
simu.run()
out2 = Output(simu=simu2)
simu2.run()
# Plot the result
out.mag.B.plot_2D_Data(
"time",
"angle[0]{°}",
data_list=[out2.mag.B],
legend_list=["Periodic", "Full"],
save_path=join(save_path, simu.name + "_B_time.png"),
is_show_fig=False,
**dict_2D
)
out.mag.B.plot_2D_Data(
"angle{°}",
"time[1]",
data_list=[out2.mag.B],
legend_list=["Periodic", "Full"],
save_path=join(save_path, simu.name + "_B_space.png"),
is_show_fig=False,
**dict_2D
)
out.force.AGSF.plot_2D_Data(
"wavenumber=[0,100]",
"time[0]",
data_list=[out2.force.AGSF],
legend_list=["Periodic", "Full"],
save_path=join(save_path, simu.name + "_P_space_fft.png"),
is_show_fig=False,
**dict_2D
)
out.force.AGSF.plot_2D_Data(
"freqs",
"angle[0]",
data_list=[out2.force.AGSF],
legend_list=["Periodic", "Full"],
save_path=join(save_path, simu.name + "_P_fft2.png"),
is_show_fig=False,
**dict_2D
)
out.mag.Tem.plot_2D_Data(
"time",
data_list=[out2.mag.Tem],
legend_list=["Periodic", "Full"],
save_path=join(save_path, simu.name + "_Tem_time.png"),
is_show_fig=False,
**dict_2D
)
out.mag.Phi_wind_stator.plot_2D_Data(
"time",
"phase",
data_list=[out2.mag.Phi_wind_stator],
legend_list=["Periodic", "Full"],
save_path=join(save_path, simu.name + "_Phi_wind_stator_time.png"),
is_show_fig=False,
**dict_2D
)
# Compare both simu
Bflux = out.mag.B
arg_list = ["angle"]
result = Bflux.get_rphiz_along(*arg_list)
Brad = result["radial"]
angle = result["angle"]
Bflux2 = out2.mag.B
arg_list = ["angle"]
result2 = Bflux2.get_rphiz_along(*arg_list)
Brad2 = result2["radial"]
assert_array_almost_equal(Brad, Brad2, decimal=1)
return out, out2 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_mag_form_fac_case1():\n ion = MagneticFormFactor('Fe')\n formfac, _temp = ion.calc_mag_form_fac()[0], ion.calc_mag_form_fac()[1:]\n del _temp\n assert (abs(np.sum(formfac) - 74.155233575216599) < 1e-12)",
"def test_mag_form_fac():\n ion = MagneticFormFactor('Fe')\n formfac, _temp = ion.calc_mag_form_fac(q=1.)[0], ion.calc_mag_form_fac(q=1.)[1:]\n del _temp\n assert (abs(formfac - 0.932565) < 1e-6)",
"def test_mag_form_fac_case2():\n ion = MagneticFormFactor('Fe')\n formfac, _temp = ion.calc_mag_form_fac(qrange=[0, 2])[0], ion.calc_mag_form_fac(qrange=[0, 2])[1:]\n del _temp\n assert (abs(np.sum(formfac) - 74.155233575216599) < 1e-12)",
"def test_library_charges_monatomic_ions(self, monatomic_ion, formal_charge):\n ff = ForceField(\n get_data_file_path(\"test_forcefields/test_forcefield.offxml\"),\n get_data_file_path(\"test_forcefields/ion_charges.offxml\"),\n )\n mol = Molecule.from_smiles(\"[{}]\".format(monatomic_ion))\n omm_system = ff.create_openmm_system(mol.to_topology())\n\n nonbondedForce = [\n f for f in omm_system.getForces() if type(f) == NonbondedForce\n ][0]\n q, _, _ = nonbondedForce.getParticleParameters(0)\n assert q == formal_charge",
"def test_FEMM_periodicity_time_no_periodicity_a():\n\n SPMSM_015 = load(join(DATA_DIR, \"Machine\", \"SPMSM_015.json\"))\n\n assert SPMSM_015.comp_periodicity() == (9, False, 9, True)\n\n simu = Simu1(name=\"test_FEMM_periodicity_time_no_periodicity_a\", machine=SPMSM_015)\n\n # Definition of the enforced output of the electrical module\n I0_rms = 250 / sqrt(2)\n Phi0 = 140 * pi / 180 # Maximum Torque Per Amp\n\n Id_ref = (I0_rms * exp(1j * Phi0)).real\n Iq_ref = (I0_rms * exp(1j * Phi0)).imag\n\n simu.input = InputCurrent(\n Id_ref=Id_ref,\n Iq_ref=Iq_ref,\n Na_tot=252 * 9,\n Nt_tot=4 * 9,\n N0=1000,\n )\n\n # Definition of the magnetic simulation: with periodicity\n simu.mag = MagFEMM(\n type_BH_stator=1,\n type_BH_rotor=1,\n is_periodicity_a=False,\n is_periodicity_t=True,\n nb_worker=cpu_count(),\n Kmesh_fineness=2,\n )\n simu.force = ForceMT()\n\n # Definition of the magnetic simulation: no periodicity\n simu2 = simu.copy()\n simu2.mag.is_periodicity_t = False\n\n # Run simulations\n out = Output(simu=simu)\n simu.run()\n\n out2 = Output(simu=simu2)\n simu2.run()\n\n # Plot the result\n out.mag.B.plot_2D_Data(\n \"time\",\n \"angle[0]{°}\",\n data_list=[out2.mag.B],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_B_time.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.mag.B.plot_2D_Data(\n \"angle{°}\",\n \"time[1]\",\n data_list=[out2.mag.B],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_B_space.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.force.AGSF.plot_2D_Data(\n \"wavenumber=[0,100]\",\n \"time[0]\",\n data_list=[out2.force.AGSF],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_P_space_fft.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.force.AGSF.plot_2D_Data(\n \"freqs\",\n \"angle[0]\",\n data_list=[out2.force.AGSF],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_P_fft2.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.mag.Tem.plot_2D_Data(\n \"time\",\n data_list=[out2.mag.Tem],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_Tem_time.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.mag.Phi_wind_stator.plot_2D_Data(\n \"time\",\n \"phase\",\n data_list=[out2.mag.Phi_wind_stator],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_Phi_wind_stator_time.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n Bflux = out.mag.B\n arg_list = [\"time\"]\n result = Bflux.get_rphiz_along(*arg_list)\n Brad = result[\"radial\"]\n time = result[\"time\"]\n\n Bflux2 = out2.mag.B\n arg_list = [\"time\"]\n result2 = Bflux2.get_rphiz_along(*arg_list)\n Brad2 = result2[\"radial\"]\n time2 = result2[\"time\"]\n\n # Compare both simu\n assert_array_almost_equal((Brad - Brad2) / Brad2, 0, decimal=2)\n assert_array_almost_equal(time, time2, decimal=6)\n\n AGSF = out.force.AGSF\n arg_list = [\"time\"]\n result_AGSF = AGSF.get_rphiz_along(*arg_list)\n Prad = result_AGSF[\"radial\"]\n time3 = result_AGSF[\"time\"]\n\n AGSF2 = out2.force.AGSF\n arg_list = [\"time\"]\n result_AGSF2 = AGSF2.get_rphiz_along(*arg_list)\n Prad2 = result_AGSF2[\"radial\"]\n time4 = result_AGSF2[\"time\"]\n\n # Compare both simu\n assert_array_almost_equal((Prad - Prad2) / Prad2, 0, decimal=2)\n assert_array_almost_equal(time3, time4, decimal=6)\n\n return out, out2",
"def check(self, X):\n if (np.min(X) < -90.) or (np.max(X) > 90.):\n print \"Warning: X may be defined in degrees instead of radians\"",
"def check_force_parameters(self, skipImpropers = False):\n self.check_bonds(self.bond_force0, self.bond_force1)\n self.check_angles(self.angle_force0, self.angle_force1)\n self.check_nonbonded(self.nonbonded_force0, self.nonbonded_force1)\n self.check_proper_torsions(self.torsion_force0, self.torsion_force1, self.bond_force0, self.bond_force1)\n if not skipImpropers:\n self.check_improper_torsions(self.torsion_force0, self.torsion_force1, self.bond_force0, self.bond_force1)\n logger.info(\"Note: skipping degenerate impropers with < 4 atoms.\")",
"def test_assign_charges_using_library_charges_by_single_atoms(self):\n ff = ForceField(\n get_data_file_path(\"test_forcefields/test_forcefield.offxml\"),\n xml_ethanol_library_charges_by_atom_ff,\n )\n\n molecules = [\n Molecule.from_file(get_data_file_path(\"molecules/ethanol.sdf\")),\n Molecule.from_file(get_data_file_path(\"molecules/ethanol_reordered.sdf\")),\n ]\n top = Topology.from_molecules(molecules)\n omm_system = ff.create_openmm_system(top)\n nonbondedForce = [\n f for f in omm_system.getForces() if type(f) == NonbondedForce\n ][0]\n expected_charges = [\n -0.2,\n -0.1,\n 0.3,\n 0.08,\n -0.02,\n -0.02,\n -0.02,\n -0.01,\n -0.01,\n -0.2,\n 0.3,\n -0.1,\n 0.08,\n -0.02,\n -0.02,\n -0.02,\n -0.01,\n -0.01,\n ] * openmm_unit.elementary_charge\n for particle_index, expected_charge in enumerate(expected_charges):\n q, _, _ = nonbondedForce.getParticleParameters(particle_index)\n assert q == expected_charge",
"def test_check_compact_axis_angle():\n a_list = [0, 0, 0]\n a = pr.check_compact_axis_angle(a_list)\n assert_array_almost_equal(a_list, a)\n assert_equal(type(a), np.ndarray)\n assert_equal(a.dtype, np.float64)\n\n random_state = np.random.RandomState(0)\n a = pr.norm_vector(pr.random_vector(random_state, 3))\n a *= np.pi + random_state.randn() * 4.0 * np.pi\n a2 = pr.check_compact_axis_angle(a)\n pr.assert_compact_axis_angle_equal(a, a2)\n assert_greater(np.linalg.norm(a2), 0)\n assert_greater(np.pi, np.linalg.norm(a2))\n\n assert_raises_regexp(\n ValueError, \"Expected axis and angle in array with shape\",\n pr.check_compact_axis_angle, np.zeros(4))\n assert_raises_regexp(\n ValueError, \"Expected axis and angle in array with shape\",\n pr.check_compact_axis_angle, np.zeros((3, 3)))",
"def test_FEMM_periodicity_time():\n\n SPMSM_015 = load(join(DATA_DIR, \"Machine\", \"SPMSM_015.json\"))\n\n assert SPMSM_015.comp_periodicity() == (9, False, 9, True)\n\n simu = Simu1(name=\"test_FEMM_periodicity_time\", machine=SPMSM_015)\n\n # Definition of the enforced output of the electrical module\n I0_rms = 250 / sqrt(2)\n Phi0 = 140 * pi / 180 # Maximum Torque Per Amp\n\n Id_ref = (I0_rms * exp(1j * Phi0)).real\n Iq_ref = (I0_rms * exp(1j * Phi0)).imag\n\n simu.input = InputCurrent(\n Id_ref=Id_ref,\n Iq_ref=Iq_ref,\n Na_tot=252 * 9,\n Nt_tot=4 * 9,\n N0=1000,\n )\n\n # Definition of the magnetic simulation: with periodicity\n simu.mag = MagFEMM(\n type_BH_stator=1,\n type_BH_rotor=1,\n is_periodicity_a=True,\n is_periodicity_t=True,\n nb_worker=cpu_count(),\n Kmesh_fineness=2,\n )\n simu.force = ForceMT()\n\n # Definition of the magnetic simulation: no periodicity\n simu2 = simu.copy()\n simu2.mag.is_periodicity_t = False\n\n # Run simulations\n out = Output(simu=simu)\n simu.run()\n\n out2 = Output(simu=simu2)\n simu2.run()\n\n # Plot the result\n out.mag.B.plot_2D_Data(\n \"time\",\n \"angle[0]{°}\",\n data_list=[out2.mag.B],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_B_time.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.mag.B.plot_2D_Data(\n \"angle{°}\",\n \"time[1]\",\n data_list=[out2.mag.B],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_B_space.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.force.AGSF.plot_2D_Data(\n \"wavenumber=[0,100]\",\n \"time[0]\",\n data_list=[out2.force.AGSF],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_P_space_fft.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.force.AGSF.plot_2D_Data(\n \"freqs\",\n \"angle[0]\",\n data_list=[out2.force.AGSF],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_P_fft2.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.mag.Tem.plot_2D_Data(\n \"time\",\n data_list=[out2.mag.Tem],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_Tem_time.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.mag.Phi_wind_stator.plot_2D_Data(\n \"time\",\n \"phase\",\n data_list=[out2.mag.Phi_wind_stator],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_Phi_wind_stator_time.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n # Compare both simu with B\n Bflux = out.mag.B\n arg_list = [\"time\"]\n result = Bflux.get_rphiz_along(*arg_list)\n Brad = result[\"radial\"]\n time = result[\"time\"]\n\n # Check Flux spatio-temporal reconstruction full\n Bflux2 = out2.mag.B\n arg_list = [\"time\"]\n result2 = Bflux2.get_rphiz_along(*arg_list)\n Brad2 = result2[\"radial\"]\n time = result2[\"time\"]\n\n assert_array_almost_equal(Brad, Brad2, decimal=2)\n\n # Compare both simu with AGSF\n AGSF = out.force.AGSF\n arg_list = [\"time\"]\n result_AGSF = AGSF.get_rphiz_along(*arg_list)\n Prad = result_AGSF[\"radial\"]\n time = result_AGSF[\"time\"]\n\n AGSF2 = out2.force.AGSF\n arg_list = [\"time\"]\n result_AGSF2 = AGSF2.get_rphiz_along(*arg_list)\n Prad2 = result_AGSF2[\"radial\"]\n time = result_AGSF2[\"time\"]\n\n assert_array_almost_equal(Prad / 1000, Prad2 / 1000, decimal=0)\n\n return out, out2",
"def comp_angle_magnet(self):\n Rbo = self.get_Rbo()\n W0 = self.comp_W0m()\n Harc = self.comp_H_arc()\n if self.is_outwards():\n return float(2 * arctan(W0 / (2 * (Rbo + self.H1 - Harc))))\n else:\n return float(2 * arctan(W0 / (2 * (Rbo - self.H1 - Harc))))\n\n # if self.W0_is_rad:\n # return self.W0\n # else: # Convert W0 from m to rad\n # Rbo = self.get_Rbo()\n # return float(2 * arcsin(self.W0 / (2 * Rbo)))",
"def _forces_moments(self, delta):\n # assert delta.shape == (4,1)\n da = delta[0]\n de = delta[1]\n dt = delta[2]\n dr = delta[3]\n\n e0 = self._state[3]\n e1 = self._state[4]\n e2 = self._state[5]\n e3 = self._state[6]\n u = self._state[7]\n v = self._state[8]\n w = self._state[9]\n p = self._state[10]\n q = self._state[11]\n r = self._state[12]\n\n self._Va = np.sqrt(u**2 + v**2 + w**2)\n self._alpha = np.arctan(1.0*w/u)\n self._beta = np.arcsin(1.0*v/self._Va)\n\n\n\n Fg = self.mass*self.gravity*np.array([2*(e1*e3-e2*e0),\n 2*(e2*e3 + e1*e0),\n e3**2 + e0**2 - e1**2 - e2**2,\n ])\n\n # Fg = self.mass*self.gravity*np.array([2*(e1*e3 - e2*e0),\n # 2*(e2*e3 + e1*e0),\n # e3**2 + e0**2 - e1**2 - e2**2,\n # ])\n\n M_e = 25\n sig = lambda a: (1+np.exp(-M_e*(a-self.alpha0))+np.exp(M_e*(a+self.alpha0)))/((1+np.exp(-M_e*(a-self.alpha0)))*(1+np.exp(M_e*(a+self.alpha0))))\n cla = lambda a: (1-sig(a))*(self.C_L_0+self.C_L_alpha*a)+sig(a)*(2*np.sign(a)*np.sin(a)**2*np.cos(a))\n cda = lambda a: self.C_D_p + (self.C_L_0+self.C_L_alpha*a)**2/(np.pi*self.e*self.AR)\n\n cxa = lambda a: -(cda(a)) * np.cos(a) + (cla(a)) * np.sin(a)\n\n cxq = lambda a: -self.C_D_q * np.cos(a) +self.C_L_q * np.sin(a)\n\n cxde = lambda a: -self.C_D_delta_e * np.cos(a) + self.C_L_delta_e * np.sin(a)\n\n cza = lambda a: -(cda(a)) * np.sin(a) - (cla(a)) * np.cos(a)\n\n czq = lambda a: -self.C_D_q * np.sin(a) - self.C_L_q * np.cos(a)\n\n czde = lambda a: -self.C_D_delta_e * np.sin(a) - self.C_L_delta_e * np.cos(a)\n\n c = self.c/(2.0*self._Va)\n b = self.b/(2.0*self._Va)\n\n\n\n one = 0.5*self.rho*self._Va**2*self.S_wing\n # two = np.array([[1,0,0],[0,1,0],[0,0,1]])\n three = np.array([[cxa(self._alpha)+cxq(self._alpha)*c*q+cxde(self._alpha)*de],\n [self.C_Y_0+self.C_Y_beta*self._beta+self.C_Y_p*b*p+self.C_Y_r*b*r+self.C_Y_delta_a*da+self.C_Y_delta_r*dr],\n [cza(self._alpha)+czq(self._alpha)*c*q+czde(self._alpha)*de]])\n\n Fa = np.squeeze(three) * one\n # pdb.set_trace()\n Fa = Fa.reshape((3,-1))\n\n F = Fg + Fa\n #\n # print(\"Fa:\",Fa)\n\n Fp = 0.5*self.rho*self.S_prop*self.C_prop*((self.k_motor*dt)**2-self._Va**2)\n\n # print(\"FP:\", Fp)\n\n fx = F[0] + Fp\n # + 0.5*MAV.rho*self._Va**2*MAV.S_wing*(\\\n # +cxa(self._alpha)\\\n # + cxq(self._alpha)*c*q\\\n # + cxde(self._alpha)*de\n # )\n\n fy = F[1]\n fz = F[2]\n\n # Moment time!!!\n one = 0.5*self.rho*self._Va**2*self.S_wing\n two = np.array([\\\n [self.b*(self.C_ell_0+self.C_ell_beta*self._beta+self.C_ell_p*b*p+self.C_ell_r*b*r+self.C_ell_delta_a*da+self.C_ell_delta_r*dr)],\n [self.c*(self.C_m_0+(self.C_m_alpha*self._alpha)+(self.C_m_q*c*q)+(self.C_m_delta_e*de))],\n [self.b*(self.C_n_0+(self.C_n_beta*self._beta)+(self.C_n_p*b*p)+(self.C_n_r*b*r)+(self.C_n_delta_a*da)+(self.C_n_delta_r*dr))]\n ])\n Ma = one * np.squeeze(two)\n # print(\"\\nMa:\", Ma)\n # pdb.set_trace()\n Ma = Ma.reshape((3,-1))\n\n size = Ma.shape[1]\n\n Mp = np.block([[np.ones(size)*-self.kTp*(self.kOmega*dt)**2],\n [np.zeros(size)],\n [np.zeros(size)]\n ])\n\n M = Mp + Ma\n\n Mx = M[0]\n My = M[1]\n Mz = M[2]\n\n # self._forces[0] = fx\n # self._forces[1] = fy\n # self._forces[2] = fz\n # pdb.set_trace()\n # print(fx, fy, fz, Mx, My, Mz)\n\n return np.array([fx, fy, fz, Mx, My, Mz])",
"def test_library_charges_dont_parameterize_molecule_because_of_incomplete_coverage(\n self,\n ):\n molecules = [Molecule.from_file(get_data_file_path(\"molecules/toluene.sdf\"))]\n top = Topology.from_molecules(molecules)\n\n # The library charges in the FF should not be able to fully cover toluene\n ff = ForceField(\n get_data_file_path(\"test_forcefields/test_forcefield.offxml\"),\n xml_ethanol_library_charges_by_atom_ff,\n )\n # Delete the ToolkitAM1BCCHandler so the molecule won't get charges from anywhere\n del ff._parameter_handlers[\"ToolkitAM1BCC\"]\n with pytest.raises(\n RuntimeError, match=\"Cc1ccccc1 could not be fully assigned charges\"\n ):\n omm_system = ff.create_openmm_system(top)\n\n # If we do NOT delete the ToolkiAM1BCCHandler, then toluene should be assigned some nonzero partial charges.\n # The exact value will vary by toolkit, so we don't test that here.\n ff = ForceField(\n get_data_file_path(\"test_forcefields/test_forcefield.offxml\"),\n xml_ethanol_library_charges_by_atom_ff,\n )\n omm_system = ff.create_openmm_system(top)\n nonbondedForce = [\n f for f in omm_system.getForces() if type(f) == NonbondedForce\n ][0]\n for particle_index in range(top.n_atoms):\n q, _, _ = nonbondedForce.getParticleParameters(particle_index)\n assert q != 0 * unit.elementary_charge",
"def is_valid_angle(self, theta_step):\n return not (self.angle % theta_step)",
"def test_fractional_bondorder_invalid_interpolation_method(self):\n mol = create_ethanol()\n\n forcefield = ForceField(\n get_data_file_path(\"test_forcefields/test_forcefield.offxml\"), xml_ff_bo\n )\n forcefield.get_parameter_handler(\n \"ProperTorsions\"\n )._fractional_bondorder_interpolation = \"invalid method name\"\n topology = Topology.from_molecules([mol])\n\n # If important, this can be a custom exception instead of a verbose ValidationError\n with pytest.raises(\n ValidationError,\n match=\"given=invalid method name\",\n ):\n forcefield.create_openmm_system(\n topology,\n charge_from_molecules=[mol],\n )",
"def f_theta_omega(angles, times):\n # gravity = 9.81\n # arm_length = 0.1 \n\n # omega = angles[0]\n # theta = angles[1]\n # ftheta = omega\n # fomega = -(gravity/arm_length) * np.sin(theta)\n pass",
"def is_pure_magnetic(self):\n klist = [key for key in self.components if not self.component_is_zero(key)]\n return all([key.startswith('magnetic') for key in klist])",
"def test_charge_increment_model_forward_and_reverse_ethanol(self):\n file_path = get_data_file_path(\"test_forcefields/test_forcefield.offxml\")\n ff = ForceField(file_path, xml_charge_increment_model_ff_ethanol)\n del ff._parameter_handlers[\"ToolkitAM1BCC\"]\n top = Topology.from_molecules([create_ethanol(), create_reversed_ethanol()])\n sys = ff.create_openmm_system(top)\n nonbonded_force = [\n force\n for force in sys.getForces()\n if isinstance(force, openmm.NonbondedForce)\n ][0]\n expected_charges = [\n 0.2,\n -0.15,\n -0.05,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n -0.05,\n -0.15,\n 0.2,\n ] * openmm_unit.elementary_charge\n for idx, expected_charge in enumerate(expected_charges):\n charge, _, _ = nonbonded_force.getParticleParameters(idx)\n assert (\n abs(charge - expected_charge) < 1.0e-6 * openmm_unit.elementary_charge\n )",
"def test_float_input_angles(self):\n decomposer = OneQubitEulerDecomposer(\"PSX\")\n input_matrix = np.array(\n [\n [0.70710678, 0.70710678],\n [0.70710678, -0.70710678],\n ],\n dtype=np.float64,\n )\n (theta, phi, lam) = decomposer.angles(input_matrix)\n expected_theta = 1.5707963267948966\n expected_phi = 0.0\n expected_lam = 3.141592653589793\n self.assertAlmostEqual(theta, expected_theta)\n self.assertAlmostEqual(phi, expected_phi)\n self.assertAlmostEqual(lam, expected_lam)",
"def test_check_axis_angle():\n a_list = [1, 0, 0, 0]\n a = pr.check_axis_angle(a_list)\n assert_array_almost_equal(a_list, a)\n assert_equal(type(a), np.ndarray)\n assert_equal(a.dtype, np.float64)\n\n random_state = np.random.RandomState(0)\n a = np.empty(4)\n a[:3] = pr.random_vector(random_state, 3)\n a[3] = random_state.randn() * 4.0 * np.pi\n a2 = pr.check_axis_angle(a)\n pr.assert_axis_angle_equal(a, a2)\n assert_almost_equal(np.linalg.norm(a2[:3]), 1.0)\n assert_greater(a2[3], 0)\n assert_greater(np.pi, a2[3])\n\n assert_raises_regexp(\n ValueError, \"Expected axis and angle in array with shape\",\n pr.check_axis_angle, np.zeros(3))\n assert_raises_regexp(\n ValueError, \"Expected axis and angle in array with shape\",\n pr.check_axis_angle, np.zeros((3, 3)))",
"def test_float_input_angles_and_phase(self):\n decomposer = OneQubitEulerDecomposer(\"PSX\")\n input_matrix = np.array(\n [\n [0.70710678, 0.70710678],\n [0.70710678, -0.70710678],\n ],\n dtype=np.float64,\n )\n (theta, phi, lam, gamma) = decomposer.angles_and_phase(input_matrix)\n expected_theta = 1.5707963267948966\n expected_phi = 0.0\n expected_lam = 3.141592653589793\n expected_gamma = -0.7853981633974483\n self.assertAlmostEqual(theta, expected_theta)\n self.assertAlmostEqual(phi, expected_phi)\n self.assertAlmostEqual(lam, expected_lam)\n self.assertAlmostEqual(gamma, expected_gamma)",
"def check_nonbonded(self, force0, force1):\n\n assert type(force0) == type(force1), \"Error: force0 and force1 must be the same type.\"\n assert type(force0) == mm.NonbondedForce, \"Error: forces must be NonbondedForces\"\n assert force0.getNumParticles() == force1.getNumParticles(), \"Error: Systems have %d and %d particles in NonbondedForce, respectively.\" % (force0.getNumParticles(), force1.getNumParticles())\n\n n_atoms = force0.getNumParticles()\n\n q, sigma, epsilon = force0.getParticleParameters(0)\n #unit_q, unit_sigma, unit_epsilon = q.unit, sigma.unit, epsilon.unit\n unit_q = u.elementary_charge\n unit_sigma = u.angstrom\n unit_epsilon = u.kilojoule_per_mole\n\n for k in range(n_atoms):\n q0, sigma0, epsilon0 = force0.getParticleParameters(k)\n q1, sigma1, epsilon1 = force1.getParticleParameters(k)\n\n q0, sigma0, epsilon0 = q0 / unit_q, sigma0 / unit_sigma, epsilon0 / unit_epsilon\n q1, sigma1, epsilon1 = q1 / unit_q, sigma1 / unit_sigma, epsilon1 / unit_epsilon\n\n assert compare(q0, q1), \"Error: Particle %d has charges of %f and %f, respectively.\" % (k, q0, q1)\n\n if epsilon0 != 0.:\n assert compare(sigma0, sigma1), \"Error: Particle %d has sigma of %f and %f angstroms, respectively.\" % (k, sigma0, sigma1)\n else:\n logger.info(\"Skipping comparison of sigma (%f, %f) on particle %d because epsilon has values %f, %f kJ/mol\" % (sigma0, sigma1, k, epsilon0, epsilon1))\n\n assert compare(epsilon0, epsilon1), \"Error: Particle %d has epsilon of %f and %f kJ/mol, respectively.\" % (k, epsilon0, epsilon1)\n\n n_exceptions = force0.getNumExceptions()\n assert force0.getNumExceptions() == force1.getNumExceptions(), \"Error: Systems have %d and %d exceptions in NonbondedForce, respectively.\" % (force0.getNumExceptions(), force1.getNumExceptions())\n\n i0, i1, qq, sigma, epsilon = force0.getExceptionParameters(0)\n unit_qq = u.elementary_charge**2\n unit_sigma = u.angstrom\n unit_epsilon = u.kilojoule_per_mole\n\n dict0, dict1 = {}, {}\n for k in range(n_exceptions):\n i0, i1, qq, sigma, epsilon = force0.getExceptionParameters(k)\n i0, i1 = reorder_bonds(i0, i1)\n dict0[i0, i1] = ((qq / unit_qq, sigma / unit_sigma, epsilon / unit_epsilon))\n\n i0, i1, qq, sigma, epsilon = force1.getExceptionParameters(k)\n i0, i1 = reorder_bonds(i0, i1)\n dict1[i0, i1] = ((qq / unit_qq, sigma / unit_sigma, epsilon / unit_epsilon))\n\n keys0 = set(dict0.keys())\n keys1 = set(dict1.keys())\n logger.info(\"Exceptions0 - Exceptions1 = %s\" % (keys0.difference(keys1)))\n logger.info(\"Exceptions1 - Exceptions0 = %s\" % (keys1.difference(keys0)))\n assert set(dict0.keys()) == set(dict1.keys()), \"Systems have different NonBondedForce Exceptions\"\n\n for k, parameter_name in enumerate([\"qq\", \"sigma\", \"epsilon\"]):\n for (i0, i1) in dict0.keys():\n val0 = dict0[i0, i1][k]\n val1 = dict1[i0, i1][k]\n if parameter_name == \"sigma\" and dict0[i0, i1][2] == 0.0 and dict1[i0, i1][2] == 0.0:\n continue # If both epsilon parameters are zero, then sigma doesn't matter so skip the comparison.\n if parameter_name ==\"sigma\":\n assert compare(val0, val1), \"Error: NonBondedForce Exception, atom (%d, %d) has sigma values of %f and %f angstroms, respectively.\" % (i0, i1, parameter_name, val0, val1)\n elif parameter_name==\"qq\":\n assert compare(val0, val1), \"Error: NonBondedForce Exception atom (%d, %d) has squared charge values of %f and %f (elementary charge)**2, respectively.\" % (i0, i1, val0, val1)\n else:\n assert compare(val0, val1), \"Error: NonBondedForce Exception, atom (%d, %d) has epsilon values of %f and %f kJ/mol, respectively.\" % (i0, i1, val0, val1)",
"def comp_angle_opening_magnet(self):\n\n if self.W1 > 0:\n Rbo = self.get_Rbo()\n return float(2 * arcsin(self.W1 / (2 * Rbo)))\n else:\n return self.comp_angle_magnet()",
"def check_angles(self, force0, force1):\n\n assert type(force0) == type(force1), \"Error: force0 and force1 must be the same type.\"\n assert type(force0) == mm.HarmonicAngleForce, \"Error: forces must be HarmonicAngleForces\"\n\n n_angles0 = force0.getNumAngles()\n n_angles1 = force1.getNumAngles()\n\n dict0, dict1 = {}, {}\n\n i0, i1, i2, theta0, k0 = force0.getAngleParameters(0)\n #unit_theta = theta0.unit\n unit_theta = u.degrees\n #unit_k = k0.unit\n unit_k = u.kilojoules_per_mole/(u.degrees)**2\n\n for k in range(n_angles0):\n i0, i1, i2, theta0, k0 = force0.getAngleParameters(k)\n if (k0 / k0.unit) != 0.0: # Skip forces with strength 0.0\n i0, i1, i2 = reorder_angles(i0, i1, i2)\n dict0[i0, i1, i2] = ((theta0 / unit_theta, k0 / unit_k))\n\n for k in range(n_angles1):\n i0, i1, i2, theta0, k0 = force1.getAngleParameters(k)\n if (k0 / k0.unit) != 0.0: # Skip forces with strength 0.0\n i0, i1, i2 = reorder_angles(i0, i1, i2)\n dict1[i0, i1, i2] = ((theta0 / unit_theta, k0 / unit_k))\n\n keys0 = set(dict0.keys())\n keys1 = set(dict1.keys())\n logger.info(\"Angles0 - Angles1 = %s\" % (keys0.difference(keys1)))\n logger.info(\"Angles1 - Angles0 = %s\" % (keys1.difference(keys0)))\n diff_keys = keys0.symmetric_difference(keys1)\n assert diff_keys == set(), \"Systems have different HarmonicAngleForce entries: extra keys are: \\n%s\" % diff_keys\n\n for k, parameter_name in enumerate([\"theta0\", \"k0\"]):\n for (i0, i1, i2) in dict0.keys():\n val0 = dict0[i0, i1, i2][k]\n val1 = dict1[i0, i1, i2][k]\n if parameter_name=='theta0':\n assert compare(val0, val1), \"Error: Harmonic Angle (%d, %d, %d) has angle values of %f and %f degrees, respectively.\" % (i0, i1, i2, val0, val1)\n else:\n assert compare(val0, val1), \"Error: Harmonic Angle (%d, %d, %d) has force constant values of %f and %f kJ/(mol degree**2), respectively.\" % (i0, i1, i2, val0, val1)",
"def isSecondOblateAxis(alpha1, alpha2, beta1, beta2, maxDist, maxTorsAngle): \n a1 = np.asarray(alpha1)\n a2 = np.asarray(alpha2)\n b1 = np.asarray(beta1)\n b2 = np.asarray(beta2)\n #lent = alpha1 - beta1\n adir = a2 - a1\n bdir = b2 - b1\n aLength = np.sqrt ( np.dot(adir, adir) )\n bLength = np.sqrt ( np.dot(bdir, bdir) )\n DotProdNormed = np.dot(adir, bdir) / ( aLength * bLength ) \n maxTors = np.cos( np.radians( maxTorsAngle ))\n if (abs(DotProdNormed) > maxTors):\n # print beta1, beta2, \"not rectangular, angle = \", np.arccos(DotProdNormed)\n return False\n # print beta1, beta2, \"is rectangular.\" \n # find nearest point to alpha mid on the potential beta axis by bisection\n # midAlpha = [a2 + 0.5 * dAlph for a2, dAlph in zip(alpha2, dirAlpha)]\n axisDist = minimalDistance(a1, a2, b1, b2)\n# print \"Distance of\", a1, \"<->\", a2, \" to \", b1, \"<->\", b2, \"is\", axisDist\n #midBeta = [b2 + 0.5 * dBeta for b2, dBeta in zip(beta2, dirBeta)]\n if axisDist < maxDist:\n # print b1, \"<->\", b2, \"is possible axis\"\n return True\n else:\n # print b1, \"<->\", b2, \"is too far (\", axisDist ,\") from\", a1, \"<->\", a2, \", maximal allowed distance =\", maxDist\n return False",
"def test_axis_angle_from_compact_axis_angle():\n ca = [0.0, 0.0, 0.0]\n a = pr.axis_angle_from_compact_axis_angle(ca)\n assert_array_almost_equal(a, np.array([1.0, 0.0, 0.0, 0.0]))\n\n random_state = np.random.RandomState(1)\n for _ in range(5):\n ca = pr.random_compact_axis_angle(random_state)\n a = pr.axis_angle_from_compact_axis_angle(ca)\n assert_almost_equal(np.linalg.norm(ca), a[3])\n assert_array_almost_equal(ca[:3] / np.linalg.norm(ca), a[:3])",
"def test_compact_axis_angle():\n a = np.array([1.0, 0.0, 0.0, 0.0])\n ca = pr.compact_axis_angle(a)\n assert_array_almost_equal(ca, np.zeros(3))\n\n random_state = np.random.RandomState(0)\n for _ in range(5):\n a = pr.random_axis_angle(random_state)\n ca = pr.compact_axis_angle(a)\n assert_array_almost_equal(pr.norm_vector(ca), a[:3])\n assert_almost_equal(np.linalg.norm(ca), a[3])",
"def testCorrectForTwoAtomCellWithoutPeriodicityNEEDED(self):\n\t\texpDist = 0.01*10\n\t\tself._checkExpMatchesActual(expDist)",
"def test_comp_magnetostrictive_tensor_1cell():\n\n # Physical quantities\n dim = 2\n Nt_tot = 1\n\n mu = 1\n Be = np.array([[[mu / 2, 0]]])\n He = np.array([[[-1 / 2, 0]]])\n mue = np.array([[mu]])\n\n Me = np.reshape(Be / mue - He, (dim, 1, Nt_tot))\n\n alphaij = [[1, 0, 0], [1, 0, 0]]\n\n alpha1 = 1\n alpha2 = 1\n\n # Computation\n tensor = ForceTensor()\n\n tensor_comp = tensor.comp_magnetrosctrictive_tensor(\n mue, Me, Nt_tot, alphaij\n ) # Should be equal to -alpha1*mu*MM' - alpha2*mu*M²*I2\n\n assert tensor_comp[0, 0, 0] == -mu * (alpha1 + alpha2)\n assert tensor_comp[0, 1, 0] == 0\n assert tensor_comp[1, 0, 0] == 0\n assert tensor_comp[1, 1, 0] == -mu * alpha2\n\n print(\"test_comp_magnetostrictive_tensor succeeded\")\n\n return True",
"def _validate_mesh(self):\n if not (np.abs(self.axis_u.dot(self.axis_v) < 1e-6) and #pylint: disable=no-member\n np.abs(self.axis_v.dot(self.axis_w) < 1e-6) and #pylint: disable=no-member\n np.abs(self.axis_w.dot(self.axis_u) < 1e-6)): #pylint: disable=no-member\n raise ValueError('axis_u, axis_v, and axis_w must be orthogonal')\n return True"
]
| [
"0.60290766",
"0.6012614",
"0.58263516",
"0.5755218",
"0.56348616",
"0.56247103",
"0.5583029",
"0.5559526",
"0.55149376",
"0.54994637",
"0.5498538",
"0.54675615",
"0.5454319",
"0.5441296",
"0.54299664",
"0.5414825",
"0.54081666",
"0.53792536",
"0.53736025",
"0.53621024",
"0.53515375",
"0.53283983",
"0.5313582",
"0.52788335",
"0.52713114",
"0.52639693",
"0.52491915",
"0.5234739",
"0.5207955",
"0.52076775"
]
| 0.6782517 | 0 |
Set the RGB value, and optionally brightness, of a single pixel. If you don't supply a brightness value, the last value will be kept. | def set_pixel(x, r, g, b, brightness=None):
if brightness is None:
brightness = pixels[x][3]
else:
brightness = int(float(MAX_BRIGHTNESS) * brightness) & 0b11111
pixels[x] = [int(r) & 0xff, int(g) & 0xff, int(b) & 0xff, brightness] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_pixel(self, frame, index, brightness):\n if brightness > 255 or brightness < 0:\n raise ValueError('Value {} out of range. Brightness must be between 0 and 255'.format(brightness))\n\n if index < 0 or index > 143:\n raise ValueError('Index must be between 0 and 143')\n\n self._buf[frame][index] = brightness",
"def set_rgb(self, value):\n act = RGBAction(self, value)\n return act.invoke()",
"def set_red(self, x, y, newval):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].set_red(newval)",
"def set_red(x, y, value, slot = 0):\r\n __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3] = value",
"def Set(self, *args):\n return _itkRGBAPixelPython.itkRGBAPixelUS_Set(self, *args)",
"def set_blue(self, x, y, newval):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].set_blue(newval)",
"def Set(self, *args):\n return _itkRGBAPixelPython.itkRGBAPixelF_Set(self, *args)",
"def rgb(self, value):\n\n self._variable = value\n self._update()",
"def set(self, pixel, r, g, b):\n\t\tself.buffer[pixel][0] = self.gamma[g]\n\t\tself.buffer[pixel][1] = self.gamma[r]\n\t\tself.buffer[pixel][2] = self.gamma[b]\n\t\t\n\t\tself.update()",
"def set_pixel(self, pixel, red, green, blue):\n self.blinkt_iface.WriteValue([0x07, 0x02, 0x00,\n pixel, red, green, blue], ())",
"def setPixel(self, x, y, r, g, b):\n self.array[x, y, 0] = (r)\n\tself.array[x, y, 1] = (g)\n\tself.array[x, y, 2] = (b)\n #QD & DT 4.2.15\n\n #_tkExec(self.image.put, \"{%s}\"%color_rgb(r,g,b), (x, y))",
"def setPixel(self, x, y, val):\r\n self.__buffer[y][x].setValue(val)",
"def set_green(self, x, y, newval):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].set_green(newval)",
"def set_param(self, name, val):\n # name will be 'colorR', 'colorG', 'colorB'\n rgb255 = int(val * 255)\n if name == 'colorR':\n self.color.r = rgb255\n elif name == 'colorG':\n self.color.g = rgb255\n elif name == 'colorB':\n self.color.b = rgb255",
"def set_rgb(self, r, g, b):\n self.r = r\n self.g = g\n self.b = b",
"def set_rgb(self, r, g, b):\n self.r = r\n self.g = g\n self.b = b",
"def set_pixel(self, x, y, value):\n if x < 0 or x > 7 or y < 0 or y > 7:\n # Ignore out of bounds pixels.\n return\n\n self.set_led(y * 16 + ((x + 7) % 8), value)",
"def set_pixel(self, x, y, value):\n if x < 0 or x > 7 or y < 0 or y > 15:\n # Ignore out of bounds pixels.\n return\n if y < 8:\n self.set_led( y * 16 + x, value)\n else:\n self.set_led((y-8) * 16 + (x+8), value)",
"def setByRGB(self, rgb: tuple):\n pass",
"def set_pixel(self, led_num, red, green, blue, bright_percent=100):\r\n if led_num < 0:\r\n return # Pixel is invisible, so ignore\r\n if led_num >= self.num_led:\r\n return # again, invisible\r\n\r\n # Calculate pixel brightness as a percentage of the\r\n # defined global_brightness. Round up to nearest integer\r\n # as we expect some brightness unless set to 0\r\n brightness = ceil(bright_percent * self.global_brightness / 100.0)\r\n brightness = int(brightness)\r\n\r\n # LED startframe is three \"1\" bits, followed by 5 brightness bits\r\n ledstart = (brightness & 0b00011111) | self.LED_START\r\n\r\n start_index = 4 * led_num\r\n self.leds[start_index] = ledstart\r\n\r\n self.leds[start_index + 1] = red\r\n self.leds[start_index + 2] = green\r\n self.leds[start_index + 3] = blue",
"def set_pixel(self, framebuf, x, y, color):\n index = (y * framebuf.stride + x) * 2\n framebuf.buf[index : index + 2] = self.color_to_rgb565(color)",
"def setRgb ( self, r, g = 0.0, b = 0.0 ):\n self.setRgba( r, g, b )",
"def set_pixel(self, x, y, value):\r\n \r\n # Rotation and mirroring\r\n a = x\r\n x = y\r\n y = 7-a\r\n \r\n # From the baseclass\r\n if x < 0 or x > 7 or y < 0 or y > 7:\r\n # Ignore out of bounds pixels.\r\n return\r\n # Set green LED based on 1st bit in value.\r\n self.set_led(y * 16 + x, 1 if value & Display.COLOR_GREEN > 0 else 0)\r\n # Set red LED based on 2nd bit in value.\r\n self.set_led(y * 16 + x + 8, 1 if value & Display.COLOR_RED > 0 else 0)",
"async def pixy_set_brightness(self, brightness):\n data = [PrivateConstants.PIXY_SET_BRIGHTNESS, brightness & 0x7f,\n (brightness >> 7) & 0x7f]\n await self._send_sysex(PrivateConstants.PIXY_CONFIG, data)",
"async def pixy_set_brightness(self, brightness):\n data = [PrivateConstants.PIXY_SET_BRIGHTNESS, brightness & 0x7f, (brightness >> 7) & 0x7f]\n await self._send_sysex(PrivateConstants.PIXY_CONFIG, data)",
"def set_blue(x, y, value, slot = 0):\r\n __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3 + 2] = value",
"def SetRGB(*args, **kwargs):\n return _gdi_.Colour_SetRGB(*args, **kwargs)",
"def set_pixel(self, x, y, v):\n self.buf[y][x] = v & 0x07",
"def set_pixel(framebuf, x, y, color):\n index = (y * framebuf.stride + x) // 8\n offset = 7 - x & 0x07\n framebuf.buf[index] = (framebuf.buf[index] & ~(0x01 << offset)) | (\n (color != 0) << offset\n )",
"def set_green(x, y, value, slot = 0):\r\n __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3+ 1] = value"
]
| [
"0.7257801",
"0.70964503",
"0.7078613",
"0.69361275",
"0.69103754",
"0.6849395",
"0.68239653",
"0.6787617",
"0.67320323",
"0.670798",
"0.6697311",
"0.6621259",
"0.65958184",
"0.6595451",
"0.6569934",
"0.6569934",
"0.6561587",
"0.6554602",
"0.653843",
"0.6494485",
"0.6490676",
"0.6482191",
"0.6471573",
"0.6467549",
"0.64650625",
"0.64648193",
"0.6440539",
"0.6412788",
"0.6403556",
"0.6403416"
]
| 0.7723842 | 0 |
Waits for wait_seconds seconds before setting stop_signal. | def sleep_and_set_stop_signal_task(stop_signal, wait_seconds):
timer = Timer(wait_seconds, stop_signal.set)
timer.daemon = True
timer.start() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wait(self, seconds):\n time.sleep(seconds)",
"def wait_for_seconds(self, seconds, sleeptime=0.001):\n self.listen_until_return(timeout=seconds, sleeptime=sleeptime)",
"def wait (self, seconds=0.0):\r\n\t\tstart_time = time.time()\r\n\t\twhile time.time() < start_time + seconds:\r\n\t\t\tself.receive()",
"def wait(self, seconds):\n logging.info(\"sleeping\")\n self.new_message_event.wait(seconds)\n logging.info(\"waking\")",
"def cancel_wait(self):\n self.lib.CancelWait()",
"def wait(wait_time=WAIT_TIME):\n # time.sleep(wait_time)\n pass",
"def wait_second(self, time_wait):\n # each test case 1st check for the stop button flag\n if not self.stopLoop:\n # get time\n ts = datetime.datetime.now().strftime(self.tsFormat)\n # Create label\n x = Label(\n self.testFrame, text=f'{ts} - Waiting {time_wait}s',\n background=self.bgChooser(),\n foreground=\"#a5120d\",\n font=self.boldFont, anchor='w')\n x.pack(fill=X)\n # add counter for BG\n self.bgCounter += 1\n # allow window to catch up\n self.tkRoot.update()\n self.update_scrollbar()\n time.sleep(1)\n # Automation Script below --------------------\n\n self.tv.wait_in_second(time_wait)\n\n # Automation Script above --------------------\n\n # revert label color to black\n x.config(foreground=\"#000\", font=self.mainFont)\n self.LabelLists.append(x)\n else:\n print(\"stopping test\")",
"def wait_stop(self):\n\n if not self.is_stopped():\n self.__thread.join()",
"def wait(wait_time):\n\n time.sleep(wait_time)",
"def wait(self, seconds):\n self.driver.implicitly_wait(seconds)",
"def __wait(min_sec, max_sec):\n time.sleep(randint(min_sec, max_sec))",
"def busy_wait(self, seconds):\n end_time = time.perf_counter() + seconds\n while(time.perf_counter() < end_time):\n pass",
"def wake_till(seconds):\n while True:\n if int(time.time()) < seconds:\n time.sleep(5)\n else:\n return",
"def sleep(self, seconds):\n\n # We schedule an alarm signal for x=seconds out in the future.\n # noinspection PyUnusedLocal\n def handle_alarm(signal_num, frame):\n pass\n\n signal.signal(signal.SIGALRM, handle_alarm)\n signal.alarm(seconds)\n\n # Wait for either the alarm to go off or for us to receive a SIGINT.\n signal.pause()\n\n # Remove the alarm if it is still pending.\n signal.alarm(0)",
"def wait(self, signal):\n while True:\n s = self.receive()\n if s == signal:\n break",
"def sleep(self, seconds):\n ten_ms_steps = int(round(seconds * 100))\n for _i in xrange(0,ten_ms_steps):\n if self._sequence_stop_signal:\n break\n sleep(0.01)",
"def user_wait(self, duration):\n self.enqueue(lambda t: sleep(duration + int(PY3)))",
"def wait(self, options):\n self.socketIO.wait(seconds=options)",
"def wait_inner():\n if (\n kernel32.WaitForMultipleObjects(\n 2,\n ctypes.pointer((HANDLE * 2)(cancel_event, timer)),\n False,\n INFINITE,\n )\n == WAIT_FAILED\n ):\n time_sleep(sleep_for)",
"def wait(self, sleep_time):\n time.sleep(sleep_time)",
"def wait(self, timeoout=None, state=\"C-completed\"):",
"def wait(self):\n try:\n self.relay.wait()\n self.responder.wait()\n except KeyboardInterrupt:\n print_notification(\"Stopping\")\n finally:\n self.terminate_processes()",
"def implicitly_wait(self, secs):\n self.base_driver.implicitly_wait(secs)",
"async def sleep(self, seconds):\n await self._sleep_until_nanos(_get_future_nanos(seconds))",
"def sleep(seconds):\n # After load and initializing the PvAPI Python's built-in 'sleep' function\n # stops working (returns too early). The is a replacement.\n from time import sleep,time\n t = t0 = time()\n while t < t0+seconds: sleep(t0+seconds - t); t = time()",
"def wait(delay=2):\n time.sleep(delay)",
"def wait(self, ms=None):\r\n util.raiseNotDefined()",
"def _sleep(self, sleep_time: float = 10) -> None:\n sleep_until_interrupt(sleep_time, lambda: self.stopped, interval=0.5)",
"def sleep(seconds):\r\n time.sleep(seconds)",
"async def _wait_for_stop_event(self):\n await self._ws_stop_event.wait()"
]
| [
"0.70341736",
"0.69638157",
"0.6919327",
"0.69169587",
"0.6484378",
"0.64468855",
"0.63162977",
"0.6311919",
"0.6281869",
"0.62770647",
"0.62602526",
"0.62214833",
"0.62033594",
"0.6187091",
"0.6172887",
"0.6160744",
"0.61564934",
"0.61425143",
"0.6129257",
"0.6078051",
"0.6040032",
"0.60173327",
"0.60112953",
"0.60070574",
"0.59990895",
"0.5981164",
"0.5976189",
"0.5964133",
"0.595756",
"0.59445775"
]
| 0.74198914 | 0 |
Duplicate one to one fields. | def __duplicate_o2o_fields(self, duplicate):
for f in self._meta.related_objects:
if f.one_to_one:
if any(
[
f.name in self._clone_o2o_fields
and f not in self._meta.concrete_fields,
self._clone_excluded_o2o_fields
and f.name not in self._clone_excluded_o2o_fields
and f not in self._meta.concrete_fields,
]
):
rel_object = getattr(self, f.name, None)
if rel_object:
new_rel_object = CloneMixin._create_copy_of_instance(
rel_object,
force=True,
sub_clone=True,
)
setattr(new_rel_object, f.remote_field.name, duplicate)
new_rel_object.save()
return duplicate | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __duplicate_m2o_fields(self, duplicate):\n fields = set()\n\n for f in self._meta.concrete_fields:\n if f.many_to_one:\n if any(\n [\n f.name in self._clone_m2o_or_o2m_fields,\n self._clone_excluded_m2o_or_o2m_fields\n and f.name not in self._clone_excluded_m2o_or_o2m_fields,\n ]\n ):\n fields.add(f)\n\n # Clone many to one fields\n for field in fields:\n item = getattr(self, field.name)\n try:\n item_clone = item.make_clone()\n except IntegrityError:\n item_clone = item.make_clone(sub_clone=True)\n\n setattr(duplicate, field.name, item_clone)\n\n return duplicate",
"def __duplicate_o2m_fields(self, duplicate):\n fields = set()\n\n for f in self._meta.related_objects:\n if f.one_to_many:\n if any(\n [\n f.get_accessor_name() in self._clone_m2o_or_o2m_fields,\n self._clone_excluded_m2o_or_o2m_fields\n and f.get_accessor_name()\n not in self._clone_excluded_m2o_or_o2m_fields,\n ]\n ):\n fields.add(f)\n\n # Clone one to many fields\n for field in fields:\n for item in getattr(self, field.get_accessor_name()).all():\n try:\n item.make_clone(attrs={field.remote_field.name: duplicate})\n except IntegrityError:\n item.make_clone(\n attrs={field.remote_field.name: duplicate}, sub_clone=True\n )\n\n return duplicate",
"def prepare_duplication(self):\n for field in self.fields:\n ofield = self.fields[field]\n\n if self.duplicate:\n if ofield.primary_key:\n self.exclude_field(field)\n continue\n\n if not self.auto_fields:\n # add others if needed\n if hasattr(ofield, 'auto_now') or \\\n hasattr(ofield, 'auto_now_add'):\n if ofield.auto_now or ofield.auto_now_add:\n self.exclude_field(field)\n continue",
"def make_fields_unique(self, fields):\n ...",
"def duplicateSettings(self, otherField):\n self.name = otherField.name\n self.enName = otherField.enName\n self.format = otherField.format\n self.prefix = otherField.prefix\n self.suffix = otherField.suffix\n self.html = otherField.html\n self.isRequired = otherField.isRequired\n self.hidden = otherField.hidden\n self.numLines = otherField.numLines\n self.initDefault = otherField.initDefault\n self.linkAltField = otherField.linkAltField\n self.parentLevel = otherField.parentLevel\n self.useFileInfo = otherField.useFileInfo\n self.showInDialog = otherField.showInDialog",
"def make_clone(self, attrs=None, sub_clone=False):\n attrs = attrs or {}\n if not self.pk:\n raise ValidationError(\n \"{}: Instance must be saved before it can be cloned.\".format(\n self.__class__.__name__\n )\n )\n if sub_clone:\n duplicate = self\n duplicate.pk = None\n else:\n duplicate = self._create_copy_of_instance(self)\n\n for name, value in attrs.items():\n setattr(duplicate, name, value)\n\n duplicate.save()\n\n duplicate = self.__duplicate_o2o_fields(duplicate)\n duplicate = self.__duplicate_o2m_fields(duplicate)\n duplicate = self.__duplicate_m2o_fields(duplicate)\n duplicate = self.__duplicate_m2m_fields(duplicate)\n return duplicate",
"def __duplicate_m2m_fields(self, duplicate):\n fields = set()\n\n for f in self._meta.many_to_many:\n if any(\n [\n f.name in self._clone_m2m_fields,\n self._clone_excluded_m2m_fields\n and f.name not in self._clone_excluded_m2m_fields,\n ]\n ):\n fields.add(f)\n for f in self._meta.related_objects:\n if f.many_to_many:\n if any(\n [\n f.get_accessor_name() in self._clone_m2m_fields,\n self._clone_excluded_m2m_fields\n and f.get_accessor_name()\n not in self._clone_excluded_m2m_fields,\n ]\n ):\n fields.add(f)\n\n # Clone many to many fields\n for field in fields:\n if hasattr(field, \"field\"):\n # ManyToManyRel\n field_name = field.field.m2m_reverse_field_name()\n through = field.through\n source = getattr(self, field.get_accessor_name())\n destination = getattr(duplicate, field.get_accessor_name())\n else:\n through = field.remote_field.through\n field_name = field.m2m_field_name()\n source = getattr(self, field.attname)\n destination = getattr(duplicate, field.attname)\n if all(\n [\n through,\n not through._meta.auto_created,\n ]\n ):\n objs = through.objects.filter(**{field_name: self.pk})\n for item in objs:\n if hasattr(through, \"make_clone\"):\n try:\n item.make_clone(attrs={field_name: duplicate})\n except IntegrityError:\n item.make_clone(\n attrs={field_name: duplicate}, sub_clone=True\n )\n else:\n item.pk = None\n setattr(item, field_name, duplicate)\n item.save()\n else:\n destination.set(source.all())\n\n return duplicate",
"def clone_setup(self, setup_id):\n setup = Setup.objects.get(id=setup_id)\n setup.pk = None # copy all fields except the primary key (id)\n setup.date = datetime.now()\n\n i = 1\n\n while len(Setup.objects.filter(name=setup.name + \" (\" + str(i) + \")\")) != 0:\n i = i + 1\n\n setup.name += \" (\" + str(i) + \")\"\n\n if setup.status == \"final\":\n setup.status = \"draft\"\n setup.save()\n\n new_subspaces = Subspace.objects.filter(setup_id=setup_id)\n\n for subspace in new_subspaces:\n subspace.setup_id = setup\n subspace.pk = None\n subspace.save()\n\n return Setup.objects.get(pk=setup.pk)",
"def copy_fields(self, entity, all_fields=False):\n\n if all_fields:\n fields = self.get_all_fields()\n else:\n fields = self.get_non_pk_fields()\n\n for field in fields.keys():\n setattr(self, field, getattr(entity, field, None))",
"def duplicate(self, duplicate):\n\n self._duplicate = duplicate",
"def _populate(self, fields):\n schema = self.schema\n for k, v in fields.items():\n fields[k] = schema.fields[k].iget(self, v)\n\n self.modify(fields)\n self.reset_modified()",
"def process_duplicate_rows(self):\n pass",
"def clone(self):",
"def _add_fields(self, fields):\n for field in fields:\n self.add(field)",
"def copy(self):\n new = object.__new__(type(self))\n new.required = self.required\n new.title = self.title\n new.type = self.type\n values = self.values\n if (values is not None):\n values = (*values,)\n new.values = values\n return new",
"def __createFields(self):\n fields = self.updateFields\n for field in fields:\n self.__createField(field)",
"def duplicate(self):\r\n duplicate = Profile()\r\n \r\n for i in self.__dict__:\r\n if type(getattr(self, i)) is dict:\r\n setattr(duplicate, i, getattr(self, i).copy())\r\n else:\r\n setattr(duplicate, i, getattr(self, i))\r\n\r\n return duplicate",
"def test__ActivityParty__copy_with__1():\n old_party_id = 'plain'\n old_size = 6\n old_max = 12\n new_party_id = 'asia'\n new_size = 1\n new_max = 8\n \n field = ActivityParty(\n party_id = old_party_id,\n size = old_size,\n max_ = old_max,\n )\n copy = field.copy_with(\n party_id = new_party_id,\n size = new_size,\n max_ = new_max,\n )\n _assert_fields_set(copy)\n vampytest.assert_is_not(field, copy)\n \n vampytest.assert_eq(copy.id, new_party_id)\n vampytest.assert_eq(copy.size, new_size)\n vampytest.assert_eq(copy.max, new_max)",
"def copy(self):",
"def case_duplicate(item):\n\n data = item.data\n case_number = data.get(\"case_number\")\n person_id = data.get(\"person_id\")\n\n table = item.table\n if case_number:\n query = (table.case_number == case_number) & \\\n (table.deleted != True)\n else:\n disease_id = data.get(\"disease_id\")\n if person_id and disease_id:\n query = (table.disease_id == disease_id) & \\\n (table.person_id == person_id) & \\\n (table.deleted != True)\n else:\n return\n\n duplicate = current.db(query).select(table.id,\n table.person_id,\n limitby=(0, 1)).first()\n if duplicate:\n item.data.person_id = duplicate.person_id\n item.id = duplicate.id\n item.method = item.METHOD.UPDATE",
"def _propagate_duplicate_cols(self, duplicate_cols):\n for duplicate in duplicate_cols:\n no_suffix = \"_\".join(duplicate.split(\"_\")[:-1])\n null_idx = self._hybrid_meta[no_suffix].isnull()\n non_null_vals = self._hybrid_meta.loc[null_idx, duplicate].values\n self._hybrid_meta.loc[null_idx, no_suffix] = non_null_vals",
"def create(self, **kwargs):\n reverse_one_to_one_fields = frozenset(kwargs).intersection(\n self.model._meta._reverse_one_to_one_field_names\n )\n if reverse_one_to_one_fields:\n raise ValueError(\n \"The following fields do not exist in this model: %s\"\n % \", \".join(reverse_one_to_one_fields)\n )\n\n obj = self.model(**kwargs)\n self._for_write = True\n obj.save(force_insert=True, using=self.db)\n return obj",
"def _create_copy_of_instance(instance, force=False, sub_clone=False):\n cls = instance.__class__\n clone_fields = getattr(cls, \"_clone_fields\", CloneMixin._clone_fields)\n clone_excluded_fields = getattr(\n cls, \"_clone_excluded_fields\", CloneMixin._clone_excluded_fields\n )\n clone_o2o_fields = getattr(\n cls, \"_clone_o2o_fields\", CloneMixin._clone_o2o_fields\n )\n clone_excluded_o2o_fields = getattr(\n cls, \"_clone_excluded_o2o_fields\", CloneMixin._clone_excluded_o2o_fields\n )\n unique_duplicate_suffix = getattr(\n cls, \"UNIQUE_DUPLICATE_SUFFIX\", CloneMixin.UNIQUE_DUPLICATE_SUFFIX\n )\n use_unique_duplicate_suffix = getattr(\n cls,\n \"USE_UNIQUE_DUPLICATE_SUFFIX\",\n CloneMixin.USE_UNIQUE_DUPLICATE_SUFFIX,\n )\n max_unique_duplicate_query_attempts = getattr(\n cls,\n \"MAX_UNIQUE_DUPLICATE_QUERY_ATTEMPTS\",\n CloneMixin.MAX_UNIQUE_DUPLICATE_QUERY_ATTEMPTS,\n )\n\n fields, unique_fields = get_fields_and_unique_fields_from_cls(\n cls=cls,\n force=force,\n clone_fields=clone_fields,\n clone_excluded_fields=clone_excluded_fields,\n clone_o2o_fields=clone_o2o_fields,\n clone_excluded_o2o_fields=clone_excluded_o2o_fields,\n )\n\n new_instance = cls()\n\n for f in fields:\n value = getattr(instance, f.attname, f.get_default())\n\n if isinstance(f, (models.DateTimeField, models.DateField)):\n if f.auto_now or f.auto_now_add:\n f.pre_save(new_instance, add=True)\n continue\n\n if all(\n [\n not f.auto_created,\n f.concrete,\n f.editable,\n f.name in unique_fields,\n ]\n ):\n # Do not try to get unique value for enum type field\n if (\n isinstance(f, (models.CharField, models.TextField))\n and not f.choices\n ):\n value = clean_value(value, unique_duplicate_suffix)\n if use_unique_duplicate_suffix:\n value = get_unique_value(\n obj=instance,\n fname=f.attname,\n value=value,\n transform=(slugify if isinstance(f, SlugField) else str),\n suffix=unique_duplicate_suffix,\n max_length=f.max_length,\n max_attempts=max_unique_duplicate_query_attempts,\n )\n\n elif isinstance(f, models.OneToOneField) and not sub_clone:\n sub_instance = getattr(instance, f.name, f.get_default())\n\n if sub_instance is not None:\n sub_instance = CloneMixin._create_copy_of_instance(\n sub_instance,\n force=True,\n sub_clone=True,\n )\n sub_instance.save()\n value = sub_instance.pk\n\n setattr(new_instance, f.attname, value)\n\n return new_instance",
"def cdup (self):\r\n pass",
"def dupe(q_1: Q) -> Q:\n\n du = Q(\n [q_1.t, q_1.x, q_1.y, q_1.z],\n q_type=q_1.q_type,\n representation=q_1.representation,\n )\n return du",
"def copyDataField(self, mle, name): \n if name not in self.__examples: \n raise ValueError(\"Field does not exist: \" + name) \n \n mle.addDataField(name, self.getDataField(name)) \n return mle",
"def clone(old):\n new_kwargs = dict([(fld.name, getattr(old, fld.name))\n for fld in old._meta.fields\n if not isinstance(fld, JeevesForeignKey)])\n ans = old.__class__(**new_kwargs)\n for fld in old._meta.fields:\n if isinstance(fld, JeevesForeignKey):\n setattr(ans, fld.attname, getattr(old, fld.attname))\n return ans",
"def duplicate(self,newName):\n raise AbstractError",
"def duplicate(self):\n\n return Note(self.nbr, self.length, self.vel)",
"def post_single(self, obj):\n\t\timport MySQLdb\n\t\tobj_copy = {}\n\t\t\n\t\tcolumns = self.db.columns(obj['type'])\n\t\t# copy valid columns\n\t\tfor c in columns:\n\t\t\tif obj.get(c):\n\t\t\t\tobj_copy[c] = obj.get(c)\n\n\t\tparts = {\n\t\t\t'tab': obj['type'],\n\t\t\t'cmd': self.post_action(obj)\n\t\t}\n\n\t\tif parts['cmd'] in ('insert', 'replace'):\n\t\t\tparts['cols'] = '`, `'.join(obj_copy.keys())\n\t\t\tparts['vals'] = ('%s,'*len(obj_copy))[:-1]\n\t\t\tquery = \"\"\"%(cmd)s into `%(tab)s`(`%(cols)s`) \n\t\t\t\tvalues (%(vals)s)\"\"\" % parts\n\t\telse:\n\t\t\tparts['set'] = ', '.join(['`%s`=%s' % (key, '%s') for key in obj_copy.keys()])\n\t\t\tparts['name'] = obj['name'].replace(\"'\", \"\\'\")\n\t\t\tquery = \"\"\"update `%(tab)s` set %(set)s where name='%(name)s'\"\"\" % parts\n\t\t\n\t\tself.db.sql(query, tuple(obj_copy.values()))"
]
| [
"0.6707461",
"0.6674422",
"0.65932596",
"0.6507167",
"0.5882289",
"0.5850159",
"0.5838307",
"0.57841384",
"0.5691724",
"0.55239975",
"0.54606026",
"0.5400994",
"0.5379366",
"0.53540444",
"0.53528345",
"0.5347377",
"0.53058666",
"0.53031313",
"0.5300691",
"0.5285195",
"0.5277633",
"0.52439535",
"0.5225573",
"0.521947",
"0.5204585",
"0.52038634",
"0.52021176",
"0.51979935",
"0.5197257",
"0.518608"
]
| 0.6830732 | 0 |
Duplicate one to many fields. | def __duplicate_o2m_fields(self, duplicate):
fields = set()
for f in self._meta.related_objects:
if f.one_to_many:
if any(
[
f.get_accessor_name() in self._clone_m2o_or_o2m_fields,
self._clone_excluded_m2o_or_o2m_fields
and f.get_accessor_name()
not in self._clone_excluded_m2o_or_o2m_fields,
]
):
fields.add(f)
# Clone one to many fields
for field in fields:
for item in getattr(self, field.get_accessor_name()).all():
try:
item.make_clone(attrs={field.remote_field.name: duplicate})
except IntegrityError:
item.make_clone(
attrs={field.remote_field.name: duplicate}, sub_clone=True
)
return duplicate | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __duplicate_m2o_fields(self, duplicate):\n fields = set()\n\n for f in self._meta.concrete_fields:\n if f.many_to_one:\n if any(\n [\n f.name in self._clone_m2o_or_o2m_fields,\n self._clone_excluded_m2o_or_o2m_fields\n and f.name not in self._clone_excluded_m2o_or_o2m_fields,\n ]\n ):\n fields.add(f)\n\n # Clone many to one fields\n for field in fields:\n item = getattr(self, field.name)\n try:\n item_clone = item.make_clone()\n except IntegrityError:\n item_clone = item.make_clone(sub_clone=True)\n\n setattr(duplicate, field.name, item_clone)\n\n return duplicate",
"def __duplicate_m2m_fields(self, duplicate):\n fields = set()\n\n for f in self._meta.many_to_many:\n if any(\n [\n f.name in self._clone_m2m_fields,\n self._clone_excluded_m2m_fields\n and f.name not in self._clone_excluded_m2m_fields,\n ]\n ):\n fields.add(f)\n for f in self._meta.related_objects:\n if f.many_to_many:\n if any(\n [\n f.get_accessor_name() in self._clone_m2m_fields,\n self._clone_excluded_m2m_fields\n and f.get_accessor_name()\n not in self._clone_excluded_m2m_fields,\n ]\n ):\n fields.add(f)\n\n # Clone many to many fields\n for field in fields:\n if hasattr(field, \"field\"):\n # ManyToManyRel\n field_name = field.field.m2m_reverse_field_name()\n through = field.through\n source = getattr(self, field.get_accessor_name())\n destination = getattr(duplicate, field.get_accessor_name())\n else:\n through = field.remote_field.through\n field_name = field.m2m_field_name()\n source = getattr(self, field.attname)\n destination = getattr(duplicate, field.attname)\n if all(\n [\n through,\n not through._meta.auto_created,\n ]\n ):\n objs = through.objects.filter(**{field_name: self.pk})\n for item in objs:\n if hasattr(through, \"make_clone\"):\n try:\n item.make_clone(attrs={field_name: duplicate})\n except IntegrityError:\n item.make_clone(\n attrs={field_name: duplicate}, sub_clone=True\n )\n else:\n item.pk = None\n setattr(item, field_name, duplicate)\n item.save()\n else:\n destination.set(source.all())\n\n return duplicate",
"def __duplicate_o2o_fields(self, duplicate):\n for f in self._meta.related_objects:\n if f.one_to_one:\n if any(\n [\n f.name in self._clone_o2o_fields\n and f not in self._meta.concrete_fields,\n self._clone_excluded_o2o_fields\n and f.name not in self._clone_excluded_o2o_fields\n and f not in self._meta.concrete_fields,\n ]\n ):\n rel_object = getattr(self, f.name, None)\n if rel_object:\n new_rel_object = CloneMixin._create_copy_of_instance(\n rel_object,\n force=True,\n sub_clone=True,\n )\n setattr(new_rel_object, f.remote_field.name, duplicate)\n new_rel_object.save()\n\n return duplicate",
"def prepare_duplication(self):\n for field in self.fields:\n ofield = self.fields[field]\n\n if self.duplicate:\n if ofield.primary_key:\n self.exclude_field(field)\n continue\n\n if not self.auto_fields:\n # add others if needed\n if hasattr(ofield, 'auto_now') or \\\n hasattr(ofield, 'auto_now_add'):\n if ofield.auto_now or ofield.auto_now_add:\n self.exclude_field(field)\n continue",
"def make_fields_unique(self, fields):\n ...",
"def _add_fields(self, fields):\n for field in fields:\n self.add(field)",
"def make_clone(self, attrs=None, sub_clone=False):\n attrs = attrs or {}\n if not self.pk:\n raise ValidationError(\n \"{}: Instance must be saved before it can be cloned.\".format(\n self.__class__.__name__\n )\n )\n if sub_clone:\n duplicate = self\n duplicate.pk = None\n else:\n duplicate = self._create_copy_of_instance(self)\n\n for name, value in attrs.items():\n setattr(duplicate, name, value)\n\n duplicate.save()\n\n duplicate = self.__duplicate_o2o_fields(duplicate)\n duplicate = self.__duplicate_o2m_fields(duplicate)\n duplicate = self.__duplicate_m2o_fields(duplicate)\n duplicate = self.__duplicate_m2m_fields(duplicate)\n return duplicate",
"def save(self, *args, **kwargs):\n super(ColumnMapping, self).save(*args, **kwargs)\n # Because we need to have saved our ColumnMapping in order to have M2M,\n # We must create it before we prune older references.\n self.remove_duplicates(self.column_raw.all())",
"def add(cls, obj1, obj2):\n if not cls._meta.many_to_many:\n raise Exception(\"ERROR: Add called on non many to many model\")\n\n query = AddQuery(cls, obj1, obj2)\n yield query.execute()\n\n if not getattr(obj1, obj2._meta.name):\n setattr(obj1, obj2._meta.name, [obj2])\n else:\n getattr(obj1, obj2._meta.name).append(obj2)\n\n if not getattr(obj2, obj1._meta.name):\n setattr(obj2, obj1._meta.name, [obj1])\n else:\n getattr(obj2, obj1._meta.name).append(obj1)",
"def copy_fields(self, entity, all_fields=False):\n\n if all_fields:\n fields = self.get_all_fields()\n else:\n fields = self.get_non_pk_fields()\n\n for field in fields.keys():\n setattr(self, field, getattr(entity, field, None))",
"def clone(old):\n new_kwargs = dict([(fld.name, getattr(old, fld.name))\n for fld in old._meta.fields\n if not isinstance(fld, JeevesForeignKey)])\n ans = old.__class__(**new_kwargs)\n for fld in old._meta.fields:\n if isinstance(fld, JeevesForeignKey):\n setattr(ans, fld.attname, getattr(old, fld.attname))\n return ans",
"def copy_relations(self, oldinstance):\n for image in oldinstance.images.all():\n image.pk = None\n image.gallery = self\n image.save()",
"def __createFields(self):\n fields = self.updateFields\n for field in fields:\n self.__createField(field)",
"def clone_setup(self, setup_id):\n setup = Setup.objects.get(id=setup_id)\n setup.pk = None # copy all fields except the primary key (id)\n setup.date = datetime.now()\n\n i = 1\n\n while len(Setup.objects.filter(name=setup.name + \" (\" + str(i) + \")\")) != 0:\n i = i + 1\n\n setup.name += \" (\" + str(i) + \")\"\n\n if setup.status == \"final\":\n setup.status = \"draft\"\n setup.save()\n\n new_subspaces = Subspace.objects.filter(setup_id=setup_id)\n\n for subspace in new_subspaces:\n subspace.setup_id = setup\n subspace.pk = None\n subspace.save()\n\n return Setup.objects.get(pk=setup.pk)",
"def duplicate(self, contributor, inherit_collection=False):\n return bulk_duplicate(\n entities=self,\n contributor=contributor,\n inherit_collection=inherit_collection,\n )",
"def test_add_dup(self):\n for i in range(3):\n self.datastore.save(self.trans)\n\n eq_(1, self.datastore._collection.count())",
"def copy_fields(self, model):\n fields = super(HistoricalRecords, self).copy_fields(model)\n for name, field in self.additional_fields.items():\n assert name not in fields\n assert hasattr(self, 'get_%s_value' % name)\n fields[name] = field\n return fields",
"def auto_populate(self, model):\n for name, val in self._fields.iteritems():\n setattr(model, name, val.data)",
"def _populate(self, fields):\n schema = self.schema\n for k, v in fields.items():\n fields[k] = schema.fields[k].iget(self, v)\n\n self.modify(fields)\n self.reset_modified()",
"def process_duplicate_rows(self):\n pass",
"def copy(self):\n return self.__class__(*self.sets)",
"def test_insert_many_to_many():\n\n model1 = get_fake_model({\"name\": models.TextField(primary_key=True)})\n\n model2 = get_fake_model(\n {\n \"name\": models.TextField(primary_key=True),\n \"model1s\": models.ManyToManyField(model1),\n }\n )\n\n row2 = model2.objects.on_conflict(\n [\"name\"], ConflictAction.UPDATE\n ).insert_and_get(name=\"swen\")\n\n row1 = model1.objects.create(name=\"booh\")\n\n row2.model1s.add(row1)\n row2.save()",
"def duplicateSettings(self, otherField):\n self.name = otherField.name\n self.enName = otherField.enName\n self.format = otherField.format\n self.prefix = otherField.prefix\n self.suffix = otherField.suffix\n self.html = otherField.html\n self.isRequired = otherField.isRequired\n self.hidden = otherField.hidden\n self.numLines = otherField.numLines\n self.initDefault = otherField.initDefault\n self.linkAltField = otherField.linkAltField\n self.parentLevel = otherField.parentLevel\n self.useFileInfo = otherField.useFileInfo\n self.showInDialog = otherField.showInDialog",
"def test_deepcopy(self):\n t = Compose([Enumerate([2, \"asfa\", \"ipsi\"]), OneHotEncode(3)], \"categorical\")\n t.transform([2])\n copy.deepcopy(t)",
"def add_fields(self, fields):\n for label, data in fields.items():\n self[label] = data",
"def _clone_rip(self, memo):\n # references lists of definitions need to be vacated except those that were cloned.\n for definition in self._definitions:\n new_references = set()\n for ref in definition._references:\n if ref in memo.values():\n new_references.add(ref)\n for instance in definition._children:\n instance._reference._references.add(instance)\n\n definition._references = new_references",
"def duplicate_invoice(invoice):\n from invoicer.models import Invoice\n from invoicer.models import LineItem\n\n # copy main attributes\n new_invoice = Invoice(\n company=invoice.company,\n invoice_date=datetime.now(),\n client=invoice.client,\n location=invoice.location,\n tax_rate=invoice.tax_rate,\n left_address=invoice.left_address,\n right_address=invoice.right_address,\n terms=invoice.terms,\n footer=invoice.footer\n )\n new_invoice.save()\n\n # now line items\n for line_item in invoice.line_items.all():\n new_invoice.line_items.add(LineItem(\n name=line_item.name,\n description=line_item.description,\n price=line_item.price,\n taxable=line_item.taxable,\n item=line_item.item,\n quantity=line_item.quantity\n ))\n\n return new_invoice",
"def _save_direct_relations(self, kwargs):\n for field_name, field in self.fields.items():\n if field.read_only:\n continue\n if isinstance(self._validated_data, dict) and self._validated_data.get(field.source) is None:\n continue\n if not isinstance(field, serializers.BaseSerializer):\n continue\n if hasattr(self, 'Meta') and hasattr(self.Meta, 'model'):\n # ModelSerializer (or similar) so we need to exclude reverse relations\n try:\n _, direct = self._get_related_field(field)\n except FieldDoesNotExist:\n continue\n if not direct:\n continue\n\n # reinject validated_data\n field._validated_data = self._validated_data[field_name]\n self._validated_data[field_name] = field.save(**kwargs.pop(field_name, {}))",
"def duplicate(self):\r\n duplicate = Profile()\r\n \r\n for i in self.__dict__:\r\n if type(getattr(self, i)) is dict:\r\n setattr(duplicate, i, getattr(self, i).copy())\r\n else:\r\n setattr(duplicate, i, getattr(self, i))\r\n\r\n return duplicate",
"def add_field(self, field_name):\n field = utils.get_field(field_name, self.model)\n if utils.is_one(field):\n self._one_fields.add(field_name)\n else:\n self._many_fields.add(field_name)"
]
| [
"0.75518346",
"0.716638",
"0.7029186",
"0.6424519",
"0.615692",
"0.5902748",
"0.580661",
"0.57859975",
"0.5785186",
"0.5717907",
"0.5689",
"0.56443495",
"0.56256235",
"0.55974805",
"0.55747044",
"0.5503478",
"0.5490743",
"0.5467568",
"0.53609717",
"0.5352907",
"0.53502095",
"0.5310417",
"0.53027606",
"0.52807564",
"0.5278822",
"0.527058",
"0.5261532",
"0.52391",
"0.5237488",
"0.5222178"
]
| 0.74103355 | 1 |
Duplicate many to one fields. | def __duplicate_m2o_fields(self, duplicate):
fields = set()
for f in self._meta.concrete_fields:
if f.many_to_one:
if any(
[
f.name in self._clone_m2o_or_o2m_fields,
self._clone_excluded_m2o_or_o2m_fields
and f.name not in self._clone_excluded_m2o_or_o2m_fields,
]
):
fields.add(f)
# Clone many to one fields
for field in fields:
item = getattr(self, field.name)
try:
item_clone = item.make_clone()
except IntegrityError:
item_clone = item.make_clone(sub_clone=True)
setattr(duplicate, field.name, item_clone)
return duplicate | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __duplicate_o2m_fields(self, duplicate):\n fields = set()\n\n for f in self._meta.related_objects:\n if f.one_to_many:\n if any(\n [\n f.get_accessor_name() in self._clone_m2o_or_o2m_fields,\n self._clone_excluded_m2o_or_o2m_fields\n and f.get_accessor_name()\n not in self._clone_excluded_m2o_or_o2m_fields,\n ]\n ):\n fields.add(f)\n\n # Clone one to many fields\n for field in fields:\n for item in getattr(self, field.get_accessor_name()).all():\n try:\n item.make_clone(attrs={field.remote_field.name: duplicate})\n except IntegrityError:\n item.make_clone(\n attrs={field.remote_field.name: duplicate}, sub_clone=True\n )\n\n return duplicate",
"def __duplicate_m2m_fields(self, duplicate):\n fields = set()\n\n for f in self._meta.many_to_many:\n if any(\n [\n f.name in self._clone_m2m_fields,\n self._clone_excluded_m2m_fields\n and f.name not in self._clone_excluded_m2m_fields,\n ]\n ):\n fields.add(f)\n for f in self._meta.related_objects:\n if f.many_to_many:\n if any(\n [\n f.get_accessor_name() in self._clone_m2m_fields,\n self._clone_excluded_m2m_fields\n and f.get_accessor_name()\n not in self._clone_excluded_m2m_fields,\n ]\n ):\n fields.add(f)\n\n # Clone many to many fields\n for field in fields:\n if hasattr(field, \"field\"):\n # ManyToManyRel\n field_name = field.field.m2m_reverse_field_name()\n through = field.through\n source = getattr(self, field.get_accessor_name())\n destination = getattr(duplicate, field.get_accessor_name())\n else:\n through = field.remote_field.through\n field_name = field.m2m_field_name()\n source = getattr(self, field.attname)\n destination = getattr(duplicate, field.attname)\n if all(\n [\n through,\n not through._meta.auto_created,\n ]\n ):\n objs = through.objects.filter(**{field_name: self.pk})\n for item in objs:\n if hasattr(through, \"make_clone\"):\n try:\n item.make_clone(attrs={field_name: duplicate})\n except IntegrityError:\n item.make_clone(\n attrs={field_name: duplicate}, sub_clone=True\n )\n else:\n item.pk = None\n setattr(item, field_name, duplicate)\n item.save()\n else:\n destination.set(source.all())\n\n return duplicate",
"def __duplicate_o2o_fields(self, duplicate):\n for f in self._meta.related_objects:\n if f.one_to_one:\n if any(\n [\n f.name in self._clone_o2o_fields\n and f not in self._meta.concrete_fields,\n self._clone_excluded_o2o_fields\n and f.name not in self._clone_excluded_o2o_fields\n and f not in self._meta.concrete_fields,\n ]\n ):\n rel_object = getattr(self, f.name, None)\n if rel_object:\n new_rel_object = CloneMixin._create_copy_of_instance(\n rel_object,\n force=True,\n sub_clone=True,\n )\n setattr(new_rel_object, f.remote_field.name, duplicate)\n new_rel_object.save()\n\n return duplicate",
"def prepare_duplication(self):\n for field in self.fields:\n ofield = self.fields[field]\n\n if self.duplicate:\n if ofield.primary_key:\n self.exclude_field(field)\n continue\n\n if not self.auto_fields:\n # add others if needed\n if hasattr(ofield, 'auto_now') or \\\n hasattr(ofield, 'auto_now_add'):\n if ofield.auto_now or ofield.auto_now_add:\n self.exclude_field(field)\n continue",
"def make_fields_unique(self, fields):\n ...",
"def save(self, *args, **kwargs):\n super(ColumnMapping, self).save(*args, **kwargs)\n # Because we need to have saved our ColumnMapping in order to have M2M,\n # We must create it before we prune older references.\n self.remove_duplicates(self.column_raw.all())",
"def make_clone(self, attrs=None, sub_clone=False):\n attrs = attrs or {}\n if not self.pk:\n raise ValidationError(\n \"{}: Instance must be saved before it can be cloned.\".format(\n self.__class__.__name__\n )\n )\n if sub_clone:\n duplicate = self\n duplicate.pk = None\n else:\n duplicate = self._create_copy_of_instance(self)\n\n for name, value in attrs.items():\n setattr(duplicate, name, value)\n\n duplicate.save()\n\n duplicate = self.__duplicate_o2o_fields(duplicate)\n duplicate = self.__duplicate_o2m_fields(duplicate)\n duplicate = self.__duplicate_m2o_fields(duplicate)\n duplicate = self.__duplicate_m2m_fields(duplicate)\n return duplicate",
"def add(cls, obj1, obj2):\n if not cls._meta.many_to_many:\n raise Exception(\"ERROR: Add called on non many to many model\")\n\n query = AddQuery(cls, obj1, obj2)\n yield query.execute()\n\n if not getattr(obj1, obj2._meta.name):\n setattr(obj1, obj2._meta.name, [obj2])\n else:\n getattr(obj1, obj2._meta.name).append(obj2)\n\n if not getattr(obj2, obj1._meta.name):\n setattr(obj2, obj1._meta.name, [obj1])\n else:\n getattr(obj2, obj1._meta.name).append(obj1)",
"def test_insert_many_to_many():\n\n model1 = get_fake_model({\"name\": models.TextField(primary_key=True)})\n\n model2 = get_fake_model(\n {\n \"name\": models.TextField(primary_key=True),\n \"model1s\": models.ManyToManyField(model1),\n }\n )\n\n row2 = model2.objects.on_conflict(\n [\"name\"], ConflictAction.UPDATE\n ).insert_and_get(name=\"swen\")\n\n row1 = model1.objects.create(name=\"booh\")\n\n row2.model1s.add(row1)\n row2.save()",
"def duplicate(self, contributor, inherit_collection=False):\n return bulk_duplicate(\n entities=self,\n contributor=contributor,\n inherit_collection=inherit_collection,\n )",
"def copy_relations(self, oldinstance):\n for image in oldinstance.images.all():\n image.pk = None\n image.gallery = self\n image.save()",
"def test_add_dup(self):\n for i in range(3):\n self.datastore.save(self.trans)\n\n eq_(1, self.datastore._collection.count())",
"def clone_setup(self, setup_id):\n setup = Setup.objects.get(id=setup_id)\n setup.pk = None # copy all fields except the primary key (id)\n setup.date = datetime.now()\n\n i = 1\n\n while len(Setup.objects.filter(name=setup.name + \" (\" + str(i) + \")\")) != 0:\n i = i + 1\n\n setup.name += \" (\" + str(i) + \")\"\n\n if setup.status == \"final\":\n setup.status = \"draft\"\n setup.save()\n\n new_subspaces = Subspace.objects.filter(setup_id=setup_id)\n\n for subspace in new_subspaces:\n subspace.setup_id = setup\n subspace.pk = None\n subspace.save()\n\n return Setup.objects.get(pk=setup.pk)",
"def process_duplicate_rows(self):\n pass",
"def clone(old):\n new_kwargs = dict([(fld.name, getattr(old, fld.name))\n for fld in old._meta.fields\n if not isinstance(fld, JeevesForeignKey)])\n ans = old.__class__(**new_kwargs)\n for fld in old._meta.fields:\n if isinstance(fld, JeevesForeignKey):\n setattr(ans, fld.attname, getattr(old, fld.attname))\n return ans",
"def duplicate(self, contributor, inherit_collection=False):\n return bulk_duplicate(\n entities=self._meta.model.objects.filter(pk=self.pk),\n contributor=contributor,\n inherit_collection=inherit_collection,\n )[0]",
"def _add_fields(self, fields):\n for field in fields:\n self.add(field)",
"def __createFields(self):\n fields = self.updateFields\n for field in fields:\n self.__createField(field)",
"def copy_fields(self, entity, all_fields=False):\n\n if all_fields:\n fields = self.get_all_fields()\n else:\n fields = self.get_non_pk_fields()\n\n for field in fields.keys():\n setattr(self, field, getattr(entity, field, None))",
"def merge_into(self, target):\n ModelField = Pool().get('ir.model.field')\n\n model_fields = ModelField.search([\n ('relation', '=', self.__name__),\n ('ttype', '=', 'many2one'),\n ])\n\n if hasattr(self, 'active'):\n self.active = False\n self.merged_into = target\n self.save()\n\n cursor = Transaction().connection.cursor()\n\n to_validate = []\n for field in model_fields:\n Model = Pool().get(field.model.model)\n\n if isinstance(getattr(Model, field.name), fields.Function):\n continue\n\n if not hasattr(Model, '__table__'):\n continue\n\n sql_table = Model.__table__()\n # Discard sql.Union or others generated by table_query()\n if not isinstance(sql_table, Table):\n continue\n\n to_validate.append(field)\n sql_field = getattr(sql_table, field.name)\n cursor.execute(*sql_table.update(\n columns=[sql_field],\n values=[target.id],\n where=(sql_field == self.id)\n ))\n\n # Validate all related records and target.\n # Do it at the very end because we may # temporarily leave\n # information inconsistent in the previous loop\n for field in model_fields:\n Model = Pool().get(field.model.model)\n\n if not isinstance(Model, ModelSQL):\n continue\n\n ff = getattr(Model, field.name)\n if isinstance(ff, fields.Function) and not ff.searcher:\n continue\n\n with Transaction().set_context(active_test=False):\n Model.validate(Model.search([\n (field.name, '=', target.id),\n ]))\n\n self.validate([target])",
"def duplicate(self,newName):\n raise AbstractError",
"def test_deepcopy(self):\n t = Compose([Enumerate([2, \"asfa\", \"ipsi\"]), OneHotEncode(3)], \"categorical\")\n t.transform([2])\n copy.deepcopy(t)",
"async def insert_many(self, models):\n\n pass",
"def _save_direct_relations(self, kwargs):\n for field_name, field in self.fields.items():\n if field.read_only:\n continue\n if isinstance(self._validated_data, dict) and self._validated_data.get(field.source) is None:\n continue\n if not isinstance(field, serializers.BaseSerializer):\n continue\n if hasattr(self, 'Meta') and hasattr(self.Meta, 'model'):\n # ModelSerializer (or similar) so we need to exclude reverse relations\n try:\n _, direct = self._get_related_field(field)\n except FieldDoesNotExist:\n continue\n if not direct:\n continue\n\n # reinject validated_data\n field._validated_data = self._validated_data[field_name]\n self._validated_data[field_name] = field.save(**kwargs.pop(field_name, {}))",
"def save_related(self, request, form, formsets, change):\n pass",
"def Clone(self, ids=[], default=None):\n default = {}\n exitValues = {} \n \n for tmpObject in self.browse(getListIDs(ids)):\n note={\n 'type': 'clone object',\n 'reason': \"Creating new cloned entity starting from '{old}'.\".format(old=tmpObject.name),\n }\n self._insertlog(tmpObject.id, note=note)\n newID = self.copy(tmpObject.id, default)\n if newID:\n newEnt = self.browse(newID)\n exitValues = {\n '_id': newID,\n 'name': newEnt.name,\n 'engineering_code': newEnt.engineering_code,\n 'engineering_revision': newEnt.engineering_revision,\n 'engineering_writable': True,\n 'state': 'draft',\n }\n break\n return packDictionary(exitValues)",
"def member_deduplicate(item):\n\n if item.tablename == \"member_membership\":\n\n db = current.db\n s3db = current.s3db\n\n mtable = s3db.member_membership\n\n data = item.data\n\n person_id = data.person_id\n organisation_id = data.organisation_id\n\n # 1 Membership record per Person<>Organisation\n query = (mtable.deleted != True) & \\\n (mtable.person_id == person_id) & \\\n (mtable.organisation_id == organisation_id)\n row = db(query).select(mtable.id,\n limitby=(0, 1)).first()\n if row:\n item.id = row.id\n item.method = item.METHOD.UPDATE",
"def many_to_one(input_dict):\n return dict((key, val)\n for keys, val in input_dict.items()\n for key in keys)",
"def auto_populate(self, model):\n for name, val in self._fields.iteritems():\n setattr(model, name, val.data)",
"def duplicate_slide(source, target, index):\n source_slide = source.slides[index]\n blank_slide_layout = _get_blank_slide_layout(target)\n dest = target.slides.add_slide(blank_slide_layout)\n\n for shape in source_slide.shapes:\n newel = deepcopy(shape.element)\n dest.shapes._spTree.insert_element_before(newel, 'p:extLst')"
]
| [
"0.73184913",
"0.717969",
"0.69729894",
"0.62483776",
"0.6107792",
"0.58588886",
"0.58578765",
"0.58333975",
"0.57033503",
"0.5657808",
"0.5648944",
"0.563969",
"0.54837763",
"0.54219484",
"0.5397397",
"0.5392138",
"0.5375547",
"0.5354627",
"0.5337015",
"0.5312537",
"0.52772385",
"0.5261812",
"0.52244574",
"0.5206605",
"0.5192923",
"0.51926553",
"0.5190502",
"0.5181809",
"0.5175779",
"0.517045"
]
| 0.7469325 | 0 |
Duplicate many to many fields. | def __duplicate_m2m_fields(self, duplicate):
fields = set()
for f in self._meta.many_to_many:
if any(
[
f.name in self._clone_m2m_fields,
self._clone_excluded_m2m_fields
and f.name not in self._clone_excluded_m2m_fields,
]
):
fields.add(f)
for f in self._meta.related_objects:
if f.many_to_many:
if any(
[
f.get_accessor_name() in self._clone_m2m_fields,
self._clone_excluded_m2m_fields
and f.get_accessor_name()
not in self._clone_excluded_m2m_fields,
]
):
fields.add(f)
# Clone many to many fields
for field in fields:
if hasattr(field, "field"):
# ManyToManyRel
field_name = field.field.m2m_reverse_field_name()
through = field.through
source = getattr(self, field.get_accessor_name())
destination = getattr(duplicate, field.get_accessor_name())
else:
through = field.remote_field.through
field_name = field.m2m_field_name()
source = getattr(self, field.attname)
destination = getattr(duplicate, field.attname)
if all(
[
through,
not through._meta.auto_created,
]
):
objs = through.objects.filter(**{field_name: self.pk})
for item in objs:
if hasattr(through, "make_clone"):
try:
item.make_clone(attrs={field_name: duplicate})
except IntegrityError:
item.make_clone(
attrs={field_name: duplicate}, sub_clone=True
)
else:
item.pk = None
setattr(item, field_name, duplicate)
item.save()
else:
destination.set(source.all())
return duplicate | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __duplicate_m2o_fields(self, duplicate):\n fields = set()\n\n for f in self._meta.concrete_fields:\n if f.many_to_one:\n if any(\n [\n f.name in self._clone_m2o_or_o2m_fields,\n self._clone_excluded_m2o_or_o2m_fields\n and f.name not in self._clone_excluded_m2o_or_o2m_fields,\n ]\n ):\n fields.add(f)\n\n # Clone many to one fields\n for field in fields:\n item = getattr(self, field.name)\n try:\n item_clone = item.make_clone()\n except IntegrityError:\n item_clone = item.make_clone(sub_clone=True)\n\n setattr(duplicate, field.name, item_clone)\n\n return duplicate",
"def __duplicate_o2m_fields(self, duplicate):\n fields = set()\n\n for f in self._meta.related_objects:\n if f.one_to_many:\n if any(\n [\n f.get_accessor_name() in self._clone_m2o_or_o2m_fields,\n self._clone_excluded_m2o_or_o2m_fields\n and f.get_accessor_name()\n not in self._clone_excluded_m2o_or_o2m_fields,\n ]\n ):\n fields.add(f)\n\n # Clone one to many fields\n for field in fields:\n for item in getattr(self, field.get_accessor_name()).all():\n try:\n item.make_clone(attrs={field.remote_field.name: duplicate})\n except IntegrityError:\n item.make_clone(\n attrs={field.remote_field.name: duplicate}, sub_clone=True\n )\n\n return duplicate",
"def add(cls, obj1, obj2):\n if not cls._meta.many_to_many:\n raise Exception(\"ERROR: Add called on non many to many model\")\n\n query = AddQuery(cls, obj1, obj2)\n yield query.execute()\n\n if not getattr(obj1, obj2._meta.name):\n setattr(obj1, obj2._meta.name, [obj2])\n else:\n getattr(obj1, obj2._meta.name).append(obj2)\n\n if not getattr(obj2, obj1._meta.name):\n setattr(obj2, obj1._meta.name, [obj1])\n else:\n getattr(obj2, obj1._meta.name).append(obj1)",
"def test_insert_many_to_many():\n\n model1 = get_fake_model({\"name\": models.TextField(primary_key=True)})\n\n model2 = get_fake_model(\n {\n \"name\": models.TextField(primary_key=True),\n \"model1s\": models.ManyToManyField(model1),\n }\n )\n\n row2 = model2.objects.on_conflict(\n [\"name\"], ConflictAction.UPDATE\n ).insert_and_get(name=\"swen\")\n\n row1 = model1.objects.create(name=\"booh\")\n\n row2.model1s.add(row1)\n row2.save()",
"def __duplicate_o2o_fields(self, duplicate):\n for f in self._meta.related_objects:\n if f.one_to_one:\n if any(\n [\n f.name in self._clone_o2o_fields\n and f not in self._meta.concrete_fields,\n self._clone_excluded_o2o_fields\n and f.name not in self._clone_excluded_o2o_fields\n and f not in self._meta.concrete_fields,\n ]\n ):\n rel_object = getattr(self, f.name, None)\n if rel_object:\n new_rel_object = CloneMixin._create_copy_of_instance(\n rel_object,\n force=True,\n sub_clone=True,\n )\n setattr(new_rel_object, f.remote_field.name, duplicate)\n new_rel_object.save()\n\n return duplicate",
"async def insert_many(self, models):\n\n pass",
"def update_m2m_fields(self, fieldname, mapping):\n _typemsg = \"Mapping must be a dictionary, with keys valued on the primary keys of instances in the queryset, \" \\\n \"valued on a list of primary keys of instances in the related object queryset\"\n if not isinstance(mapping, dict):\n raise TypeError(_typemsg)\n\n for _, value in mapping.items():\n if not isinstance(value, list):\n raise TypeError(_typemsg)\n\n field = self.model._meta.get_field(fieldname)\n if not isinstance(field, models.ManyToManyField):\n raise TypeError('Field must be a many-to-many type')\n\n field_instance = getattr(self.model, fieldname, None)\n if not field_instance:\n raise ValueError('Field not found')\n\n ThroughModel = field_instance.through\n through_model_fields = ThroughModel._meta.get_fields()\n\n # align which field goes with which through model field\n\n mapping_key_fieldname = ''\n mapping_value_fieldname = ''\n\n for f in through_model_fields:\n if isinstance(f, models.ForeignKey) and f.target_field.model == self.model:\n mapping_key_fieldname = f.attname\n\n elif isinstance(f, models.ForeignKey):\n mapping_value_fieldname = f.attname\n\n\n # delete existing m2m relationships for the provided keys\n key_ids = [i for i in mapping.keys()]\n ThroughModel.objects.filter(**{\n mapping_key_fieldname + '__in': key_ids\n }).delete()\n\n ls = []\n for key, values in mapping.items():\n for value in values:\n ls.append(ThroughModel(**{\n mapping_key_fieldname: key,\n mapping_value_fieldname: value\n }))\n\n ThroughModel.objects.bulk_create(ls)",
"def save_object(self, obj, **kwargs):\n obj._complex_m2m_data={};\n if getattr(obj, '_m2m_data', None):\n for relatedObject in obj._meta.get_all_related_many_to_many_objects():\n if (relatedObject.field.rel.through._meta.auto_created):\n # These are non-trough ManyToMany relations and\n # can be updated just fine\n continue\n fieldName = relatedObject.get_accessor_name()\n if fieldName in obj._m2m_data.keys():\n obj._complex_m2m_data[fieldName] = (relatedObject, obj._m2m_data[fieldName])\n del obj._m2m_data[fieldName]\n\n serializers.ModelSerializer.save_object(self, obj, **kwargs);\n\n for (accessor, stuff) in obj._complex_m2m_data.items():\n (relatedObject, data) = stuff\n through = relatedObject.field.rel.through\n local_fieldName = relatedObject.field.m2m_reverse_field_name()\n remote_fieldName = relatedObject.field.m2m_field_name()\n\n # get the current set of existing relations\n existing = through.objects.filter(**{local_fieldName: obj});\n\n data_ids = [item.id for item in data]\n existing_ids = [getattr(item,remote_fieldName).id for item in existing]\n\n #print \"data_ids\", data_ids\n #print \"existing_ids\", existing_ids\n\n # remove relations that are in 'existing' but not in 'data'\n for item in list(existing):\n if (getattr(item,remote_fieldName).id not in data_ids):\n print \"delete\", getattr(item,remote_fieldName)\n item.delete() #(purge=True)\n\n # add relations that are in 'data' but not in 'existing'\n for item in data:\n if (item.id not in existing_ids):\n #print \"add\", item\n newModel = through(**{local_fieldName: obj, remote_fieldName: item})\n newModel.save()",
"def save(self, *args, **kwargs):\n super(ColumnMapping, self).save(*args, **kwargs)\n # Because we need to have saved our ColumnMapping in order to have M2M,\n # We must create it before we prune older references.\n self.remove_duplicates(self.column_raw.all())",
"def _object_update(self, obj, items):\n # many to many fields are saved after the main object\n m2ms = {}\n for key, value in items.iteritems():\n try:\n field = obj._meta.get_field(key)\n if isinstance(field, ManyToManyField):\n m2ms[key] = value\n else:\n setattr(obj, key, value)\n\n except FieldDoesNotExist:\n raise InvalidParameter(key)\n\n try:\n obj.full_clean()\n obj.save()\n except ValidationError as e:\n raise InvalidParameter(e.message_dict, override=True)\n\n for key, values in m2ms.iteritems():\n manager = getattr(obj, key)\n manager.clear()\n manager.add(*values)",
"def copy_relations(self, oldinstance):\n for image in oldinstance.images.all():\n image.pk = None\n image.gallery = self\n image.save()",
"def test_many_to_many_mapping_cache_with_add(self):\n new_cars = CarFactory.create_batch(size=3)\n initial_count = len(Driver.objects.get(id=self.driver.id).cars.all())\n self.driver.cars.add(*new_cars)\n reset_queries()\n\n # Cache for both models should be invalidated as add is an m2m change\n new_count = len(Driver.objects.get(id=self.driver.id).cars.all())\n self.assertEqual(len(connection.queries), 2)\n self.assertEqual(initial_count + 3, new_count)",
"def _add_fields(self, fields):\n for field in fields:\n self.add(field)",
"def add_many_descriptors(self, descriptors):",
"def prepare_duplication(self):\n for field in self.fields:\n ofield = self.fields[field]\n\n if self.duplicate:\n if ofield.primary_key:\n self.exclude_field(field)\n continue\n\n if not self.auto_fields:\n # add others if needed\n if hasattr(ofield, 'auto_now') or \\\n hasattr(ofield, 'auto_now_add'):\n if ofield.auto_now or ofield.auto_now_add:\n self.exclude_field(field)\n continue",
"def set_many_db_object(instances, **fields):\n if isinstance(instances, QuerySet):\n instances.update(**fields)\n elif isinstance(instances, (list, tuple, set)):\n for instance in instances:\n set_one_db_object(instance, **fields)",
"def __createFields(self):\n fields = self.updateFields\n for field in fields:\n self.__createField(field)",
"def populate(self):\n insert_many_on_conflict_ignore(self.settings, Player, self.rows)",
"def make_fields_unique(self, fields):\n ...",
"def copy_taggeditems(apps, schema_editor):\n TaggitTaggedItem = apps.get_model('taggit', 'TaggedItem')\n ExtrasTaggedItem = apps.get_model('extras', 'TaggedItem')\n\n tagged_items_values = TaggitTaggedItem.objects.all().values('id', 'object_id', 'content_type_id', 'tag_id')\n tagged_items = [ExtrasTaggedItem(**tagged_item) for tagged_item in tagged_items_values]\n ExtrasTaggedItem.objects.bulk_create(tagged_items)",
"def _save_reverse_relations(self, related_objects, instance):\n for field, related_field, data, kwargs in related_objects:\n # inject the PK from the instance\n if isinstance(field, serializers.ListSerializer):\n for obj in data:\n obj[related_field.name] = instance\n elif isinstance(field, serializers.ModelSerializer):\n data[related_field.name] = instance\n else:\n raise Exception(\"unexpected serializer type\")\n\n # reinject validated_data\n field._validated_data = data\n field.save(**kwargs)",
"def set_many(self, mapping, timeout=None):\n values = [self._get_doc(key, value, timeout) for key, value in mapping.iteritems()]\n self.collection.insert_many(values)\n return True",
"def copy_tags(apps, schema_editor):\n TaggitTag = apps.get_model('taggit', 'Tag')\n ExtrasTag = apps.get_model('extras', 'Tag')\n\n tags_values = TaggitTag.objects.all().values('id', 'name', 'slug')\n tags = [ExtrasTag(**tag) for tag in tags_values]\n ExtrasTag.objects.bulk_create(tags)",
"def _update_many_to_many(self, table, parent_id_name, child_id_name, parent_id_val, new_list, old_list=None):\n\n session = object_session(self)\n\n old_set = {x for x in old_list} if old_list else set()\n new_set = {x for x in new_list} if new_list else set()\n\n # Update many-to-many relations\n # Remove old relations and apply the new ones\n if old_set != new_set:\n to_add = new_set - old_set\n to_del = old_set - new_set\n\n if to_del:\n session.execute(\n table.delete().where(\n and_(table.c[parent_id_name] == parent_id_val, table.c[child_id_name].in_(to_del))\n )\n )\n if to_add:\n session.execute(\n table.insert().values([{parent_id_name: parent_id_val, child_id_name: my_id} for my_id in to_add])\n )",
"def test_many_to_many_with_intermediate(self):\n artist = Artist.objects.create(name=\"Great singer\")\n group = Group.objects.create(name=\"Cool band\")\n\n # can't use group.members.add() with intermediate model\n membership = Membership.objects.create(\n artist=artist,\n group=group,\n invite_reason=\"Need a new drummer\"\n )\n\n # group members visible now\n self.assertEqual(group.members.count(), 1)\n\n # soft-delete intermediate instance\n # so link should be invisible\n membership.delete()\n self.assertEqual(Membership.objects.deleted_only().count(), 1)\n\n self.assertEqual(group.members.count(), 0)\n self.assertEqual(artist.group_set.count(), 0)",
"def test_add_dup(self):\n for i in range(3):\n self.datastore.save(self.trans)\n\n eq_(1, self.datastore._collection.count())",
"def upsert(self, data, parameters=None, many_to_many_clear: bool = True):\n many_to_many_fields = self.get_many_to_many_fields()\n many_to_many_fields_store = {}\n\n # Skip if no identifier is set\n if (not self.unique_identifier and not self.unique_identifiers) or not data:\n return None\n\n try:\n if isinstance(parameters, dict):\n data.update(parameters)\n\n identity = self.set_identifiers(data)\n\n # Escape many to many fields from object and store to add later on\n if many_to_many_fields:\n for field in many_to_many_fields:\n evaluated = data.pop(field, None)\n many_to_many_fields_store[field] = evaluated\n\n # Update or create object\n model, created = self.model.objects.update_or_create(**identity, defaults=data)\n\n # Attach many to many objects after save\n if model and many_to_many_fields_store:\n for field, objects in many_to_many_fields_store.items():\n element = getattr(model, field, None)\n if many_to_many_clear:\n element.clear()\n if objects:\n element.add(*objects)\n\n # Finally return model\n return model, created\n\n except DataError as e:\n raise ValidationError(e)",
"def _save_direct_relations(self, kwargs):\n for field_name, field in self.fields.items():\n if field.read_only:\n continue\n if isinstance(self._validated_data, dict) and self._validated_data.get(field.source) is None:\n continue\n if not isinstance(field, serializers.BaseSerializer):\n continue\n if hasattr(self, 'Meta') and hasattr(self.Meta, 'model'):\n # ModelSerializer (or similar) so we need to exclude reverse relations\n try:\n _, direct = self._get_related_field(field)\n except FieldDoesNotExist:\n continue\n if not direct:\n continue\n\n # reinject validated_data\n field._validated_data = self._validated_data[field_name]\n self._validated_data[field_name] = field.save(**kwargs.pop(field_name, {}))",
"def related_update(self, name, items, field='id'):\n\n\t\t# Grab this model's content type\n\t\tcontent_type = ContentType.objects.get_for_model(type(self))\n\n\t\tfor group, model, extra, title in self.get_related_models():\n\t\t\tif name == group:\n\t\t\t\tfrom .models import Related\n\t\t\t\targs = {\n\t\t\t\t\t'group': name,\n\t\t\t\t\t'content_type': content_type,\n\t\t\t\t\t'object_id': self.id,\n\t\t\t\t\t'related_content_type': ContentType.objects.get_for_model(model),\n\t\t\t\t}\n\n\t\t\t\t# Remove current group relations\n\t\t\t\tRelated.objects.filter(**args).delete()\n\n\t\t\t\t# Add new relations\n\t\t\t\tfor i, item in enumerate(items):\n\t\t\t\t\ttry:\n\t\t\t\t\t\titem_id = item if field == 'id' else model.objects.get(**{field: item}).id\n\t\t\t\t\t\targs.update({\n\t\t\t\t\t\t\t'sort': i,\n\t\t\t\t\t\t\t'related_object_id': item_id,\n\t\t\t\t\t\t})\n\t\t\t\t\t\tRelated.objects.create(**args)\n\t\t\t\t\texcept model.DoesNotExist:\n\t\t\t\t\t\tpass",
"def save_related(self, request, form, formsets, change):\n pass"
]
| [
"0.71822715",
"0.6938308",
"0.6353612",
"0.618208",
"0.6114017",
"0.6109971",
"0.5827082",
"0.5766371",
"0.5740473",
"0.57385474",
"0.56653893",
"0.5664595",
"0.56420845",
"0.55645937",
"0.55632085",
"0.5549489",
"0.547048",
"0.54265815",
"0.53681564",
"0.5348527",
"0.5340041",
"0.5308211",
"0.5304325",
"0.5301199",
"0.5285995",
"0.5282501",
"0.52488184",
"0.5246661",
"0.5241424",
"0.52356106"
]
| 0.7390873 | 0 |
Sets the business_account of this UserResponse. | def business_account(self, business_account):
self._business_account = business_account | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def business_id(self, business_id):\n\n self._business_id = business_id",
"def business_email(self, business_email):\n\n self._business_email = business_email",
"def business_owner(self, business_owner):\n\n self._business_owner = business_owner",
"def business_phone(self, business_phone):\n\n self._business_phone = business_phone",
"def business_model(self, business_model):\n\n self._business_model = business_model",
"def is_business(self, is_business):\n\n self._is_business = is_business",
"def business_phone_number(self, business_phone_number):\n\n self._business_phone_number = business_phone_number",
"def account_balance(self, account_balance):\n\n self._account_balance = account_balance",
"def account_balance(self, account_balance):\n\n self._account_balance = account_balance",
"def business_owner_email(self, business_owner_email):\n\n self._business_owner_email = business_owner_email",
"def setAccount(self, account_id):\n self.data_struct['_setAccount'] = account_id",
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account",
"def set_audit_account(self, audit_account):\n self.single_selection_from_kendo_dropdown(self.statement_entry_audit_account_locator, audit_account)\n self.wait_for_ajax_spinner_load()",
"def step_impl_the_ru_is_set_to(context, business_id):\n context.bdd_helper.message_data[\"business_id\"] = business_id",
"def set_account(self, account: str):\n ret = self._call_txtrader_api('set_account', {'account': account})\n if ret:\n self.account = account\n return ret",
"def account_bank_id(self, account_bank_id):\n\n self._account_bank_id = account_bank_id",
"def account_status(self, account_status):\n\n self._account_status = account_status",
"def set_account(self):\n return self.__Account",
"def service_account(self, service_account):\n\n self._service_account = service_account",
"def account(self, account: str):\n self._account = account",
"def business_home_page(self, business_home_page: str):\n self._business_home_page = business_home_page",
"def account_amount(self, account_amount):\n\n self._account_amount = account_amount",
"def individual_account(self, individual_account):\n\n self._individual_account = individual_account",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def account_id(self, account_id):\n\n self._account_id = account_id"
]
| [
"0.68295044",
"0.6694788",
"0.65968657",
"0.64488673",
"0.63704747",
"0.61454445",
"0.60893625",
"0.58178234",
"0.58178234",
"0.5807157",
"0.5681304",
"0.5420851",
"0.5420851",
"0.5420851",
"0.5420851",
"0.53558195",
"0.5355273",
"0.53436404",
"0.5324513",
"0.52895993",
"0.52246135",
"0.517185",
"0.5164289",
"0.5159948",
"0.51538277",
"0.51505893",
"0.50991464",
"0.50991464",
"0.50991464",
"0.50991464"
]
| 0.8394445 | 0 |
Sets the individual_account of this UserResponse. | def individual_account(self, individual_account):
self._individual_account = individual_account | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setAccount(self, account_id):\n self.data_struct['_setAccount'] = account_id",
"def set_account(self, account: str):\n ret = self._call_txtrader_api('set_account', {'account': account})\n if ret:\n self.account = account\n return ret",
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account",
"def account_number(self, account_number):\n\n self._account_number = account_number",
"def account(self, account: str):\n self._account = account",
"def account_status(self, account_status):\n\n self._account_status = account_status",
"def setaccount(self, vergeaddress, account):\n return self.proxy.setaccount(vergeaddress, account)",
"def account_id(self, account_id):\n self._account_id = account_id",
"def account_amount(self, account_amount):\n\n self._account_amount = account_amount",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def business_account(self, business_account):\n\n self._business_account = business_account",
"def master_account(self, master_account):\n\n self._master_account = master_account",
"def account_holder(self, account_holder):\n\n self._account_holder = account_holder",
"def set_account(self):\n return self.__Account",
"def account_name(self, account_name):\n\n self._account_name = account_name",
"def account_name(self, account_name):\n\n self._account_name = account_name",
"def change_account(self, account):\r\n check_account = Account(account, steem_instance=self.steem)\r\n self.account = check_account[\"name\"]\r\n self.refresh()",
"def account_balance(self, account_balance):\n\n self._account_balance = account_balance",
"def account_balance(self, account_balance):\n\n self._account_balance = account_balance",
"def principal(self, principal):\n\n self._principal = principal",
"def set_individual(\n self, index: int, individual: Individual, diff_nodes: List[int]\n ) -> None:\n old_individual = self._individuals[index]\n self._individuals[index] = individual\n self._update_correlation(index, old_individual, diff_nodes)\n self._update_metrics(index, old_individual)",
"def account_type(self, account_type):\n\n self._account_type = account_type"
]
| [
"0.6242222",
"0.62353134",
"0.6195874",
"0.6195874",
"0.6195874",
"0.6195874",
"0.6064232",
"0.6054456",
"0.599",
"0.5941063",
"0.59252465",
"0.58991206",
"0.5867289",
"0.5867289",
"0.5867289",
"0.5867289",
"0.5867289",
"0.5867289",
"0.58530086",
"0.56883174",
"0.5645553",
"0.56303823",
"0.55309767",
"0.55309767",
"0.5410755",
"0.539998",
"0.539998",
"0.52434486",
"0.52277505",
"0.5172749"
]
| 0.80935436 | 0 |
Sets the registration_marketplace_id of this UserResponse. | def registration_marketplace_id(self, registration_marketplace_id):
self._registration_marketplace_id = registration_marketplace_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def marketplace_id(self, marketplace_id):\n\n self._marketplace_id = marketplace_id",
"def put(self, **kwargs):\n contract = {\n \"pushRegKey\": [\"id\",\"+\"]\n }\n try:\n self.check_params_conform(contract)\n except ValidatorException:\n return\n\n user_id = kwargs[\"id\"]\n user = models.User.get_by_id(user_id)\n if user is None:\n self.abort(422, \"Could not find user\")\n\n user.pushRegKey = self.get_param(\"pushRegKey\")\n user.put()\n\n self.set_default_success_response()\n self.send_response()",
"def save(self, *args, **kwargs):\n self.registration_response_key = self.registration_response_key or None\n return super().save(*args, **kwargs)",
"def registration_id(self, registration_id):\n\n self._registration_id = registration_id",
"def set_regID():\n if not request.json or not 'regID' in request.json or not 'id' in request.json:\n abort(400)\n\n regID = request.json.get('regID', \"\")\n id = request.json['id']\n\n users = models.User.query.all()\n for u in users:\n if u.regid == str(regID):\n u.regid = 0\n\n user = models.User.query.get(id)\n if user:\n user.regid = str(regID)\n db.session.commit()\n return jsonify({'user': 'updated'}), 200\n\n return jsonify({'user': 'not found'}), 401",
"def reg_id(self, reg_id):\n\n self._reg_id = reg_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def id_user(self, id_user):\n\n self._id_user = id_user",
"def set_AWSMarketplaceId(self, value):\n super(ListOrdersInputSet, self)._set_input('AWSMarketplaceId', value)",
"def set_user_register(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_sptr_set_user_register(self, *args, **kwargs)",
"def set_google_id(self, google_id):\n self._google_id = google_id",
"def registered_face_id(self, registered_face_id):\n\n self._registered_face_id = registered_face_id",
"def register_user(self):\n response = self.client.post(self.register_url, self.register_data, format='json')\n return response",
"def response_id(self, response_id):\n\n self._response_id = response_id",
"def user_id(self, user_id):\n # type: (string_types) -> None\n\n if user_id is not None:\n if not isinstance(user_id, string_types):\n raise TypeError(\"Invalid type for `user_id`, type has to be `string_types`\")\n\n self._user_id = user_id",
"def registration_tid(self, registration_tid):\n\n self._registration_tid = registration_tid",
"def user(self, user):\n self.user_id = user.get_id()"
]
| [
"0.65472084",
"0.563777",
"0.5593157",
"0.55648047",
"0.5374765",
"0.52975535",
"0.52847886",
"0.52847886",
"0.52847886",
"0.52847886",
"0.52847886",
"0.52847886",
"0.52847886",
"0.52847886",
"0.52847886",
"0.52847886",
"0.52847886",
"0.52847886",
"0.52847886",
"0.52847886",
"0.5221391",
"0.49729037",
"0.4949675",
"0.489465",
"0.4886806",
"0.4855748",
"0.48248115",
"0.48133096",
"0.4813174",
"0.47778004"
]
| 0.7918989 | 0 |
Convert a pygame surface into string | def surface_to_string( surface ):
return pygame.image.tostring( surface, 'RGB' ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pygame_to_cvimage( surface ):\n cv_image = cv.CreateImageHeader( surface.get_size(), cv.IPL_DEPTH_8U, 3 )\n image_string = surface_to_string( surface )\n cv.SetData( cv_image, image_string )\n return cv_image",
"def grabRawFrame(self):\r\n \r\n self.surface = self.capture.get_image(self.surface)\r\n width, height = self.surface.get_size()\r\n return pygame.image.tostring(self.surface, 'RGB'), width, height, 1",
"def getSurface(self, name):\n name = name.lower().replace(' ', '_') + '.png'\n return load(join(self.game.main_path, 'rec', 'items', name))",
"def get_surface_type(self) -> SurfaceTypeStr:\n return SURFACE_TYPES.inverse[self.surfaceType()]",
"def get_surface_class(self) -> SurfaceClassStr:\n return SURFACE_CLASS.inverse[self.surfaceClass()]",
"def display_cairo_surface(surface):\n b = BytesIO()\n\n surface.write_to_png(b)\n b.seek(0)\n data = b.read()\n\n ip_img = display.Image(data=data, format='png', embed=True)\n return ip_img._repr_png_()",
"def grid_to_string(grid, player):\n single_string = \"\"\n row = 0\n column = 0\n # Loop through the grid and use the cells' display attributes\n # If the current position in the grid is the player, replace it with the\n # player's display ('A')\n while row < len(grid):\n while column < len(grid[0]):\n if player.row == row and player.col == column:\n single_string += player.display\n else:\n single_string += grid[row][column].display\n column += 1\n row += 1\n column = 0\n single_string += \"\\n\"\n\n # Add on the water bucket message with the proper plural phrasing\n buckets = player.num_water_buckets\n s = 's'\n if buckets == 1:\n s = ''\n single_string += f\"\\nYou have {buckets} water bucket{s}.\"\n return single_string",
"def to_str(self):\n return u\"Superellipse[{:.4g},{:.4g}]\".format(self.alpha0.l, self.alpha0.r)",
"def cam_to_string(cam):\n cam_string = (\"near;{:8f}\\n\").format(cam.nearPlane)\n cam_string += (\"far;{:8f}\\n\").format(cam.farPlane)\n cam_string += (\"focal_length;{:8f}\\n\".format(cam.projectionMatrix[0][0]))\n cam_string += (\"fov;{}\").format(cam.fov)\n return cam_string",
"def convert_layers_to_string(layers: list) -> str:\n string_conversion = \"\"\n for layer in layers:\n string_conversion += \"\\n\" + \"\".join(layer)\n return string_conversion",
"def draw_text(screen, font, text, surfacewidth, surfaceheight):\n\tfw, fh = font.size(text) # fw: font width, fh: font height\n\tsurface = font.render(text, True, (0, 0, 255))\n\t# // makes integer division in python3 \n\tscreen.blit(surface, (0,0))",
"def get_movie_texture():\n\n global surface\n global surface_file\n\n playing = renpy.audio.music.get_playing(\"movie\")\n\n pss = renpy.audio.audio.pss\n\n if pss:\n size = pss.movie_size()\n else:\n size = (64, 64)\n\n if (surface is None) or (surface.get_size() != size) or (surface_file != playing):\n surface = renpy.display.pgrender.surface(size, False)\n surface_file = playing\n surface.fill((0, 0, 0, 255))\n\n tex = None\n\n if playing is not None:\n renpy.display.render.mutated_surface(surface)\n tex = renpy.display.draw.load_texture(surface, True)\n\n return tex",
"def toSurface(self):\n surface = pygame.Surface((self.getWidth(), self.getHeight()), 0, 8)\n surface.fill((255, 255, 255))\n black = surface.map_rgb((0, 0, 0))\n mapdata = pygame.PixelArray(surface)\n for pos in self._invalidPositions:\n try:\n mapdata[pos[0]][pos[1]] = black\n except:\n pass\n return surface",
"def surface(self):\n return self._surface",
"def makeSurf(self):\n layer = None\n\n if self.color is not None:\n layer = pygame.Surface(self.destSurf.get_size(), flags=pygame.HWSURFACE)\n layer = layer.convert()\n layer.fill(self.color)\n else:\n layer = pygame.Surface(self.destSurf.get_size(), flags=pygame.SRCALPHA|pygame.HWSURFACE, depth=32)\n layer.convert_alpha()\n layer.fill((0, 0, 0, 0))\n\n return layer",
"def goify(self, layout=None):\n xx,yy,zz = self.getXYZ(layout)\n surf = dict(\n type='surface',\n x=xx,\n y=yy,\n z=zz\n )\n return surf",
"def _rgb_to_string(rgb_tup: tuple, alpha: int = 1) -> str:\n return f\"rgba({', '.join(map(str, rgb_tup))}, {alpha})\"",
"def draw(self, surface):\n\t\tangle = self.direction.angle_to(UP) # Translates spaceship's direction into rotation angle in degrees\n\t\trotated_surface = rotozoom(self.sprite, angle, 1.0) # Rotates the sprite. Last arg is scale change, hence 1.0\n\t\trotated_surface_size = Vector2(rotated_surface.get_size())\n\t\tblit_position = self.position - rotated_surface_size * 0.5 # Blit position calculated based on rotated surface size, which differs from original size\n\t\tsurface.blit(rotated_surface, blit_position)",
"def surface_type(self):\n surf_type = BRepAdaptor_Surface(self.topods_shape()).GetType()\n if surf_type == GeomAbs_Plane:\n return \"plane\"\n if surf_type == GeomAbs_Cylinder:\n return \"cylinder\"\n if surf_type == GeomAbs_Cone:\n return \"cone\"\n if surf_type == GeomAbs_Sphere:\n return \"sphere\"\n if surf_type == GeomAbs_Torus:\n return \"torus\"\n if surf_type == GeomAbs_BezierSurface:\n return \"bezier\"\n if surf_type == GeomAbs_BSplineSurface:\n return \"bspline\"\n if surf_type == GeomAbs_SurfaceOfRevolution:\n return \"revolution\"\n if surf_type == GeomAbs_SurfaceOfExtrusion:\n return \"extrusion\"\n if surf_type == GeomAbs_OffsetSurface:\n return \"offset\"\n if surf_type == GeomAbs_OtherSurface:\n return \"other\"\n return \"unknown\"",
"def Texture(self):\n s = self.texture\n assert s in range(1,6), \"Texture score out of bounds.\"\n if s == 1: return 'Non-Solid / Ground Glass Opacity Texture'\n elif s == 2: return 'Non-Solid or Mixed Texture'\n elif s == 3: return 'Part Solid or Mixed Texture'\n elif s == 4: return 'Mixed or Solid Texure'\n elif s == 5: return 'Solid Texture'",
"def __repr__(self):\n s = \"\"\n for y in range(0,HEIGHT):\n temp=\"\"\n for x in range(0,WIDTH):\n temp = temp+ str(self.gameState[x,y])\n s += temp+\"\\n\"\n return s",
"def _new_spyral_surface(size):\n return pygame.Surface((int(size[0]),\n int(size[1])),\n pygame.SRCALPHA, 32).convert_alpha()",
"def surface_type(self) -> typing.Union[None, str]:\n surface_type = self.data[3]\n surface_type = re.findall(r'RWY surface: (.+)', surface_type)\n return surface_type[0] if surface_type else None",
"def __str__(self):\n s=\"\"\n for y in range(0,HEIGHT):\n for x in range(0,WIDTH):\n s+=str(self.gameState[x,y])\n return s",
"def __repr__(self) -> str:\n argument_dict = {\n \"T_e\": self.T_e,\n \"n_e\": self.n_e,\n \"particle\": self.particle,\n \"Z\": self.Z,\n }\n\n return code_repr.call_string(PlasmaBlob, (), argument_dict)",
"def prescription(self):\n prescription = \"\\n{0:>10}\\t{1:>10}\\t{2:>10}\\t{3:>10}\\n\".format(\"R\",\"Material\",\"d\",\"diameter\")\n for surface in self.lensSurfaces():\n prescription += \"{0:>10.2f}\\t{1:>10}\\t{2:>10.2f}\\t{3:>10.2f}\\n\".format(surface.R, str(surface.mat), surface.spacing, surface.diameter)\n return prescription",
"def cvimage_to_pygame( image ):\n #image_rgb = cv.CreateMat(image.height, image.width, cv.CV_8UC3)\n #cv.CvtColor(image, image_rgb, cv.CV_BGR2RGB)\n return pygame.image.frombuffer( image.tostring(), cv.GetSize( image ), \"P\" )",
"def draw(self, surface):\n temp = pygame.Surface(self.renderer.pixel_size)\n self.renderer.render_map(temp)\n pygame.transform.smoothscale(temp, surface.get_size(), surface)",
"def basic_render(self, surface) -> None:\n if not self.visible:\n return\n l, t = self.pos\n r, b = self.get_anchor_pos(Anchor.bottom_right)\n tpos = self.get_anchor_pos(Anchor.middle)\n backcolor = (128, 128, 128)\n forecolor = {False: (255, 255, 192), True: (255, 0, 0)}\n pts = ((l, t), (r, t), (r, b), (l, b))\n pygame.draw.polygon(surface, backcolor, pts, 0)\n pygame.draw.polygon(surface, forecolor[self.hover], pts, 1)\n BitmapFont.set_colors(BitmapFont.medium, backcolor, forecolor[self.hover])\n BitmapFont.render(surface, str(self.label), BitmapFont.medium, tpos, Anchor.middle)",
"def draw(self, surface):\n ent = self.controller.entity_selection\n\n # If we have not selected an entity.\n if not ent:\n self.surface.blit(self.background, (0, 0))\n self.controller.entity_selection_track = False\n return\n \n # And provide details about the unit.\n unit_text = self.font.render(\"%s (id: %s)\" % (ent.name, ent.id), True, (255, 255, 255))\n w, _ = unit_text.get_size()\n self.surface.blit(unit_text, ((self.width / 2) - w / 2, 15))\n \n output = [\"Location: (%d, %d)\" % tuple(ent.location)]\n\n if ent.name == \"ant\":\n output.append(\"Energy: %s\" % ent.c[\"attrs\"][\"energy\"])\n output.append(\"Health: %s\" % ent.c[\"attrs\"][\"health\"])\n output.append(\"Brain state: %s\" % ent.brain.active_state.name)\n output.append(\"Speed: %d\" % ent.c[\"velocity\"].speed)\n if ent.c[\"destination\"].location:\n output.append(\"Destination: (%s, %s)\" % tuple(ent.c[\"destination\"].location))\n if ent.c[\"destination\"].isentity:\n output.append(\"Target: (%s)\" % ent.c[\"destination\"].val.name)\n \n for i, line in enumerate(output):\n text = self.font.render(line, True, (255, 255, 255))\n self.surface.blit(text, (10, 30 + i*15))\n \n # Blit to the main surface.\n surface.blit(self.surface, ((self.x, self.y)))"
]
| [
"0.6516572",
"0.6308944",
"0.59656525",
"0.5701781",
"0.552377",
"0.5499506",
"0.54991174",
"0.54027945",
"0.53784376",
"0.5358556",
"0.53564054",
"0.5315486",
"0.5277987",
"0.5269198",
"0.526664",
"0.5256117",
"0.52520627",
"0.52384955",
"0.52231586",
"0.5221528",
"0.5211215",
"0.5194983",
"0.5190656",
"0.5189817",
"0.5184409",
"0.5165874",
"0.516569",
"0.5125685",
"0.5107312",
"0.51004744"
]
| 0.9001708 | 0 |
Convert a pygame surface into a cv image | def pygame_to_cvimage( surface ):
cv_image = cv.CreateImageHeader( surface.get_size(), cv.IPL_DEPTH_8U, 3 )
image_string = surface_to_string( surface )
cv.SetData( cv_image, image_string )
return cv_image | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cvimage_to_pygame( image ):\n #image_rgb = cv.CreateMat(image.height, image.width, cv.CV_8UC3)\n #cv.CvtColor(image, image_rgb, cv.CV_BGR2RGB)\n return pygame.image.frombuffer( image.tostring(), cv.GetSize( image ), \"P\" )",
"def grabRawFrame(self):\r\n \r\n self.surface = self.capture.get_image(self.surface)\r\n width, height = self.surface.get_size()\r\n return pygame.image.tostring(self.surface, 'RGB'), width, height, 1",
"def surface_to_string( surface ):\n return pygame.image.tostring( surface, 'RGB' )",
"def surface2im(u, v):\n kernel_x = torch.tensor(\n [[[0, 0, 0], [1, 0, -1], [0, 0, 0]]]\n )\n kernel_y = torch.tensor(\n [[[0, 1, 0], [0, 0, 0], [0, -1, 0]]]\n )\n def kernelize(kernel):\n kernel = torch.reshape(kernel, (1, 1, 3, 3))\n kernel = kernel.double()\n return kernel\n kernel_x = kernelize(kernel_x)\n kernel_y = kernelize(kernel_y)\n\n surf_x = torch.nn.functional.conv2d(u, kernel_x)\n surf_y = torch.nn.functional.conv2d(u, kernel_y)\n img = (\n (v[0] * surf_x + v[1] * surf_y - v[2])\n / (torch.sqrt(1 + surf_x**2 + surf_y**2) * (v[0]**2 * v[1]**2 * v[2]**2)**.5)\n )\n return img",
"def convertDepthFrame(self):\n try:\n\n \"\"\" \n Convert Depth frame to rudimentary colormap\n \"\"\"\n self.DepthHSV[...,0] = self.currentDepthFrame\n self.DepthHSV[...,1] = 0x9F\n self.DepthHSV[...,2] = 0xFF\n self.DepthCM = cv2.cvtColor(self.DepthHSV,cv2.COLOR_HSV2RGB)\n cv2.drawContours(self.DepthCM,self.block_contours,-1,(0,0,0),3)\n\n img = QImage(self.DepthCM,\n self.DepthCM.shape[1],\n self.DepthCM.shape[0],\n QImage.Format_RGB888\n )\n return img\n except:\n return None",
"def convertDepthFrame(self):\n try:\n \"\"\" \n Convert Depth frame to rudimentary colormap\n \"\"\"\n self.DepthHSV[...,0] = self.currentDepthFrame\n self.DepthHSV[...,1] = 0x9F\n self.DepthHSV[...,2] = 0xFF\n self.DepthCM = cv2.cvtColor(self.DepthHSV,cv2.COLOR_HSV2RGB)\n cv2.drawContours(self.DepthCM,self.block_contours,-1,(0,0,0),3)\n cv2.drawContours(self.DepthCM,self.block_contours_2,-1,(0,0,0),3)\n cv2.drawContours(self.DepthCM,self.block_contours_3,-1,(0,0,0),3)\n\n img = QImage(self.DepthCM,\n self.DepthCM.shape[1],\n self.DepthCM.shape[0],\n QImage.Format_RGB888\n )\n return img\n except:\n return None",
"def get_image(data):\n global g_depth\n # https://stackoverflow.com/questions/12569452/how-to-identify-numpy-types-in-python\n if isinstance(data, np.ndarray):\n # https://www.learnopencv.com/applycolormap-for-pseudocoloring-in-opencv-c-python/\n if g_danger_binary_image:\n img = np.array(depth2danger(data / 1000, g_depth_params) * 255, dtype=np.uint8)\n im_color = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)\n else:\n img = np.array(np.minimum(255*40, data)/40, dtype=np.uint8)\n im_color = cv2.applyColorMap(img, cv2.COLORMAP_JET)\n\n # https://stackoverflow.com/questions/19306211/opencv-cv2-image-to-pygame-image\n image = pygame.image.frombuffer(im_color.tostring(), im_color.shape[1::-1], \"RGB\")\n g_depth = data\n elif isinstance(data, tuple):\n img_data, depth_data = data\n image = pygame.image.load(io.BytesIO(img_data), 'JPG').convert()\n g_depth = decompress_depth(depth_data)\n elif isinstance(data, list):\n # image stereo artefact localization\n # expects localized pair of images [camera_name, [robot_pose, camera_pose, image], [robot_pose, camera_pose, image]]\n assert len(data) == 3, len(data)\n image = pygame.image.load(io.BytesIO(data[1][2]), 'JPG').convert()\n elif data is not None:\n image = pygame.image.load(io.BytesIO(data), 'JPG').convert()\n else:\n image = None\n return image, (None if g_depth is None else depth_map(g_depth))",
"def blit_img(surface, img):\n if img is None:\n return\n\n mode = img.mode\n size = img.size\n data = img.tobytes()\n\n py_image = pygame.image.fromstring(data, size, mode)\n py_image = pygame.transform.scale(py_image, (surface.get_size()))\n\n surface.blit(py_image, (0, 0))",
"def video_handle_for_demo():\n frame = cv2.imread(\"vision.png\")\n\n return frame",
"def setupuv(rc):\n if cv is not None:\n (r,c) = rc\n u = cv.CreateMat(r, c, cv.CV_32FC1)\n v = cv.CreateMat(r, c, cv.CV_32FC1)\n return (u, v)\n else:\n return [None]*2",
"def makeSurf(self):\n layer = None\n\n if self.color is not None:\n layer = pygame.Surface(self.destSurf.get_size(), flags=pygame.HWSURFACE)\n layer = layer.convert()\n layer.fill(self.color)\n else:\n layer = pygame.Surface(self.destSurf.get_size(), flags=pygame.SRCALPHA|pygame.HWSURFACE, depth=32)\n layer.convert_alpha()\n layer.fill((0, 0, 0, 0))\n\n return layer",
"def render(self, ctx):\n image = np.zeros((self.height, self.width, 3), np.uint8)\n for shape in ctx.shapes:\n if isinstance(shape, context.Rectangle):\n x = int(shape.width / 2)\n y = int(shape.height / 2)\n rad = np.radians(shape.rotation)\n rotation = np.array([[np.cos(rad), -np.sin(rad)],\n [np.sin(rad), np.cos(rad)]])\n translation = np.array([[shape.center.x], [shape.center.y]])\n corners = np.array([[-x, x, x, -x], [y, y, -y, -y]])\n transformed_corners = rotation.dot(corners) + translation\n transformed_corners = transformed_corners.T.astype(int)\n cv2.fillPoly(image, pts=[transformed_corners],\n color=shape.color)\n elif isinstance(shape, context.Circle):\n center = (int(shape.center.x), int(shape.center.y))\n image = cv2.circle(image, center, int(shape.radius),\n color=shape.color, thickness=shape.thickness)\n elif isinstance(shape, context.Text):\n center = (int(shape.center.x), int(shape.center.y))\n image = cv2.putText(image, shape.content, center,\n cv2.FONT_HERSHEY_SIMPLEX, shape.size,\n shape.color, 3, cv2.LINE_AA)\n elif isinstance(shape, context.Image):\n file_image = cv2.imread(shape.filepath, cv2.IMREAD_UNCHANGED)\n file_image = cv2.resize(file_image, (shape.width, shape.height))\n\n y1 = int(shape.center.y - shape.height / 2)\n y2 = int(y1 + file_image.shape[0])\n x1 = int(shape.center.x - shape.width / 2)\n x2 = int(x1 + file_image.shape[1])\n\n rgba = cv2.cvtColor(file_image, cv2.COLOR_RGB2RGBA)\n alpha_s = rgba[:, :, 3] / 255.0\n alpha_l = 1.0 - alpha_s\n\n image_save = image.copy()\n for c in range(0, 3):\n try:\n image[y1:y2, x1:x2, c] = (\n alpha_s * file_image[:, :, c] +\n alpha_l * image[y1:y2, x1:x2, c])\n except ValueError:\n image = image_save\n\n self._display_frame(image)",
"def greyscale(surface):\n surface = pygame.transform.scale(surface, (8, 8))\n greyscale = pygame.Surface((8, 8))\n for y in range(surface.get_height()):\n for x in range(surface.get_width()):\n greyscale.set_at((x, y), return_average(surface.get_at((x,y))))\n return greyscale",
"def get_image(self, vehicle_id, instance_id):\n return cv2.cvtColor(cv2.imdecode(self.atlas[vehicle_id][instance_id], 1), cv2.COLOR_BGR2RGB)",
"def convertDetectFrame(self):\n \n self.processDetectFrame()\n try:\n img = QImage(self.currentDetectFrame,\n self.currentDetectFrame.shape[1],\n self.currentDetectFrame.shape[0],\n QImage.Format_RGB888\n )\n return img\n except:\n return None",
"def capture(self):\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n ret, img = self.vid.read()\n cv2.imwrite(\"/tmp/test.png\", img)\n return img",
"def orb():\n image = pygame.Surface([16, 16], pygame.SRCALPHA)\n pygame.draw.circle(image, colour.WHITE, (8, 8), 8, 0)\n return image",
"def draw(self, surface):\n\n\t\tsurface.blit(self.image, self.rect.topleft)",
"def show_render_face(data):\n return cv2.resize(data['render'][20:180,330:490,:],(256,256));",
"def input_image():\r\n im = cv2.imread('im7.png')\r\n return im",
"def get_image(self, frame):\n self.surface.fill((0, 0, 0, 0))\n for sprite in self.sprites:\n self.surface.blit(sprite.get_image(frame), (0, 0))\n return self.surface",
"def convert_image(rel_path_in, rel_path_out):\n #Lade Bild mit Originalmaske im Grayscale-Modus\n img = cv2.imread(rel_path_in, cv2.IMREAD_GRAYSCALE)\n #Jetzt steht in img ein 2D-Array/Matrix mit jedem Graufstufen-Wert der Pixel\n #Skaliere Pixelwerte runter\n for zeilen_index in range(0,img.__len__()):\n for spalten_index in range(0, img[zeilen_index].__len__()):\n #Hole Pixel-Wert an aktueller Stelle\n wert = img[zeilen_index][spalten_index]\n #Falls Wert != 0 (also Pixel gehoert nicht zum Hintergrund)\n if wert != 0: # != 0 statt == 255, da auch z.B. 253er Werte in den Masken existieren... (vielleicht durch Konvertierung in anderes Format?)\n #Markiere den Pixel mit 1 statt 255\n img[zeilen_index][spalten_index]=1\n #print(img)\n #*NACHDEM* alle Pixel skaliert wurden, zeichne Umrandung der Objekte\n umrandung_zeichnen(img)\n #change_color(img, 0, 255)\n #change_color(img, 1, 0)\n #print(img)\n #Schreibe Ergebnis-Bild in uebergebene Datei\n cv2.imwrite(rel_path_out, img)",
"def get_image():\n bgr = np.frombuffer(\n stream.read_frame().get_buffer_as_uint8(), dtype=np.uint8\n ).reshape(RESOLUTIONY, RESOLUTIONX, 3)\n rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)\n return rgb",
"def toSurface(self):\n surface = pygame.Surface((self.getWidth(), self.getHeight()), 0, 8)\n surface.fill((255, 255, 255))\n black = surface.map_rgb((0, 0, 0))\n mapdata = pygame.PixelArray(surface)\n for pos in self._invalidPositions:\n try:\n mapdata[pos[0]][pos[1]] = black\n except:\n pass\n return surface",
"def _new_spyral_surface(size):\n return pygame.Surface((int(size[0]),\n int(size[1])),\n pygame.SRCALPHA, 32).convert_alpha()",
"def convert_img(self):\r\n self.img = self.img.convert('RGB')",
"def display_cairo_surface(surface):\n b = BytesIO()\n\n surface.write_to_png(b)\n b.seek(0)\n data = b.read()\n\n ip_img = display.Image(data=data, format='png', embed=True)\n return ip_img._repr_png_()",
"def _create_bg_surf(self) -> Surface:\n if self.props.bg is None:\n surf = engine.create_surface(self.tsize, True)\n surf.fill(self.props.bg_color)\n return surf\n else:\n if not self.cached_background or not self.cached_background.get_size() == self.tsize:\n if self.props.bg_resize_mode == ResizeMode.AUTO:\n bg_raw = engine.surf_to_raw(self.props.bg, 'RGBA')\n pil_bg = Image.frombytes('RGBA', self.props.bg.get_size(), bg_raw, 'raw')\n pil_bg = pil_bg.resize(self.tsize, resample=LANCZOS)\n bg_scaled = engine.raw_to_surf(pil_bg.tobytes('raw', 'RGBA'), self.tsize, 'RGBA')\n self.cached_background = bg_scaled\n else:\n base = engine.create_surface(self.tsize, True)\n base.blit(self.props.bg, (0, 0))\n self.cached_background = base\n return self.cached_background",
"def update(self) -> pygame.Surface:\n return self.surface",
"def fotoXO(self):\n\t\ttry:\n\t\t\timageP = self.cam.get_image()\n\t\t\tself.image = pygame.transform.flip(imageP, True, False)\n\t\texcept:\n\t\t\tpass\n\t\treturn self.image"
]
| [
"0.7807069",
"0.64404505",
"0.64314246",
"0.6241736",
"0.6146377",
"0.6104478",
"0.5927644",
"0.5911911",
"0.5818782",
"0.58139366",
"0.57531345",
"0.5712382",
"0.56956905",
"0.5679619",
"0.5668957",
"0.5600549",
"0.55530584",
"0.55383086",
"0.5536562",
"0.5504997",
"0.55032736",
"0.5501509",
"0.54966867",
"0.54765064",
"0.54669046",
"0.5442471",
"0.54388916",
"0.54357606",
"0.5429832",
"0.54043293"
]
| 0.87929255 | 0 |
Convert cvimage into a pygame image | def cvimage_to_pygame( image ):
#image_rgb = cv.CreateMat(image.height, image.width, cv.CV_8UC3)
#cv.CvtColor(image, image_rgb, cv.CV_BGR2RGB)
return pygame.image.frombuffer( image.tostring(), cv.GetSize( image ), "P" ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pygame_to_cvimage( surface ):\n cv_image = cv.CreateImageHeader( surface.get_size(), cv.IPL_DEPTH_8U, 3 )\n image_string = surface_to_string( surface )\n cv.SetData( cv_image, image_string )\n return cv_image",
"def input_image():\r\n im = cv2.imread('im7.png')\r\n return im",
"def convert_image(self, ros_img):\n try:\n cv_image = self.bridge.imgmsg_to_cv2(ros_img, \"bgr8\")\n return cv_image\n except CvBridgeError as e:\n print(e)",
"def video_handle_for_demo():\n frame = cv2.imread(\"vision.png\")\n\n return frame",
"def load_image():\n return cv2.imread('test.png')\n pass",
"def camera_cb(self, msg):\n #rospy.loginfo(\"Received new image\")\n\n try:\n image = self.bridge.imgmsg_to_cv2(msg, \"bgr8\")\n except CvBridgeError as e:\n rospy.logerr(e)\n return\n\n self.image = cv2.flip(image, -1)",
"def cvtImage(self, ros_image):\n try:\n self.cv_image = self.bridge.imgmsg_to_cv2(ros_image, \"bgr8\")\n # self.cv_image_copy = self.cv_image.copy()\n\n except CvBridgeError as e:\n print(e)",
"def convert_image(rel_path_in, rel_path_out):\n #Lade Bild mit Originalmaske im Grayscale-Modus\n img = cv2.imread(rel_path_in, cv2.IMREAD_GRAYSCALE)\n #Jetzt steht in img ein 2D-Array/Matrix mit jedem Graufstufen-Wert der Pixel\n #Skaliere Pixelwerte runter\n for zeilen_index in range(0,img.__len__()):\n for spalten_index in range(0, img[zeilen_index].__len__()):\n #Hole Pixel-Wert an aktueller Stelle\n wert = img[zeilen_index][spalten_index]\n #Falls Wert != 0 (also Pixel gehoert nicht zum Hintergrund)\n if wert != 0: # != 0 statt == 255, da auch z.B. 253er Werte in den Masken existieren... (vielleicht durch Konvertierung in anderes Format?)\n #Markiere den Pixel mit 1 statt 255\n img[zeilen_index][spalten_index]=1\n #print(img)\n #*NACHDEM* alle Pixel skaliert wurden, zeichne Umrandung der Objekte\n umrandung_zeichnen(img)\n #change_color(img, 0, 255)\n #change_color(img, 1, 0)\n #print(img)\n #Schreibe Ergebnis-Bild in uebergebene Datei\n cv2.imwrite(rel_path_out, img)",
"def cam_callback(msg):\n #cam_window_name = \"Baxter Video Feed\"\n bridge = CvBridge() #instantiate CvBridge\n img_bgr = bridge.imgmsg_to_cv2(msg, \"bgr8\") #ROS Image msg to OpenCV2\n self.img = img_bgr",
"def convertDepthFrame(self):\n try:\n\n \"\"\" \n Convert Depth frame to rudimentary colormap\n \"\"\"\n self.DepthHSV[...,0] = self.currentDepthFrame\n self.DepthHSV[...,1] = 0x9F\n self.DepthHSV[...,2] = 0xFF\n self.DepthCM = cv2.cvtColor(self.DepthHSV,cv2.COLOR_HSV2RGB)\n cv2.drawContours(self.DepthCM,self.block_contours,-1,(0,0,0),3)\n\n img = QImage(self.DepthCM,\n self.DepthCM.shape[1],\n self.DepthCM.shape[0],\n QImage.Format_RGB888\n )\n return img\n except:\n return None",
"def convert_img(self):\r\n self.img = self.img.convert('RGB')",
"def get_image(self, vehicle_id, instance_id):\n return cv2.cvtColor(cv2.imdecode(self.atlas[vehicle_id][instance_id], 1), cv2.COLOR_BGR2RGB)",
"def convertDepthFrame(self):\n try:\n \"\"\" \n Convert Depth frame to rudimentary colormap\n \"\"\"\n self.DepthHSV[...,0] = self.currentDepthFrame\n self.DepthHSV[...,1] = 0x9F\n self.DepthHSV[...,2] = 0xFF\n self.DepthCM = cv2.cvtColor(self.DepthHSV,cv2.COLOR_HSV2RGB)\n cv2.drawContours(self.DepthCM,self.block_contours,-1,(0,0,0),3)\n cv2.drawContours(self.DepthCM,self.block_contours_2,-1,(0,0,0),3)\n cv2.drawContours(self.DepthCM,self.block_contours_3,-1,(0,0,0),3)\n\n img = QImage(self.DepthCM,\n self.DepthCM.shape[1],\n self.DepthCM.shape[0],\n QImage.Format_RGB888\n )\n return img\n except:\n return None",
"def opencv_to_pil(opencv_image):\n # Convert RGB to BGR\n opencv_image = cv2.cvtColor(opencv_image, cv2.COLOR_BGR2RGB)\n pil_image = Image.fromarray(opencv_image)\n\n return pil_image",
"def PreProcessing(image):\n\timage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\timage = cv2.resize(image, (300, 300))\n\t# type conversion to UINT8\n\timage = image.astype(np.uint8).copy()\n\treturn image",
"def import_image():\n img = cv2.imread(\"resources/lena.png\")\n\n cv2.imshow(\"Output\", img)\n cv2.waitKey(0)",
"def convert_image(img):\n def dodgeV2(x, y): # dodging and merging\n return cv2.divide(x, 255 - y, scale=256)\n # convert to grey\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # bitwising\n img_invert = cv2.bitwise_not(img_gray)\n # smoothing and scaling\n img_smoothing = cv2.GaussianBlur(img_invert, (27, 27),sigmaX=-1.0, sigmaY=-1.0) # blurring by applying Gaussian filter to the inverted image\n final_img = dodgeV2(img_gray, img_smoothing)\n # adjust the shape and return\n pp_image= np.stack([final_img,final_img,final_img],axis=-1)\n return pp_image",
"def update_image(self, cv_img):\n qt_img = self.convert_cv_qt(cv_img)\n self.camera_feed.setPixmap(qt_img)",
"def get_image(data):\n global g_depth\n # https://stackoverflow.com/questions/12569452/how-to-identify-numpy-types-in-python\n if isinstance(data, np.ndarray):\n # https://www.learnopencv.com/applycolormap-for-pseudocoloring-in-opencv-c-python/\n if g_danger_binary_image:\n img = np.array(depth2danger(data / 1000, g_depth_params) * 255, dtype=np.uint8)\n im_color = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)\n else:\n img = np.array(np.minimum(255*40, data)/40, dtype=np.uint8)\n im_color = cv2.applyColorMap(img, cv2.COLORMAP_JET)\n\n # https://stackoverflow.com/questions/19306211/opencv-cv2-image-to-pygame-image\n image = pygame.image.frombuffer(im_color.tostring(), im_color.shape[1::-1], \"RGB\")\n g_depth = data\n elif isinstance(data, tuple):\n img_data, depth_data = data\n image = pygame.image.load(io.BytesIO(img_data), 'JPG').convert()\n g_depth = decompress_depth(depth_data)\n elif isinstance(data, list):\n # image stereo artefact localization\n # expects localized pair of images [camera_name, [robot_pose, camera_pose, image], [robot_pose, camera_pose, image]]\n assert len(data) == 3, len(data)\n image = pygame.image.load(io.BytesIO(data[1][2]), 'JPG').convert()\n elif data is not None:\n image = pygame.image.load(io.BytesIO(data), 'JPG').convert()\n else:\n image = None\n return image, (None if g_depth is None else depth_map(g_depth))",
"def image(self):\n return cv2.imread(self.image_path)",
"def img_to_cv2(self, image_msg):\n # rospy.loginfo(\"image is of type: \" + str(type(image_msg)))\n type_as_str = str(type(image_msg))\n if type_as_str.find('sensor_msgs.msg._CompressedImage.CompressedImage') >= 0:\n # Image to numpy array\n np_arr = np.fromstring(image_msg.data, np.uint8)\n # Decode to cv2 image and store\n return cv2.imdecode(np_arr, cv2.IMREAD_COLOR)\n elif type_as_str.find('sensor_msgs.msg._Image.Image') >= 0:\n # Use CvBridge to transform\n try:\n return self.bridge.imgmsg_to_cv2(image_msg,\n image_msg.encoding) # \"bgr8\"\n except CvBridgeError as e:\n rospy.logerr(\"Error when converting image: \" + str(e))\n return None\n else:\n rospy.logerr(\"We don't know how to transform image of type \" +\n str(type(image_msg)) + \" to cv2 format.\")\n return None",
"def plot_cv_img(input_image): \n # change color channels order for matplotlib \n plt.imshow(cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)) \n\n # For easier view, turn off axis around image \n plt.axis('off')\n plt.show()",
"def update_image(self, cv_img):\n\t\tqt_img = self.ImageEdits(cv_img)\n\t\tself.camera.setPixmap(qt_img)",
"def _ros_image_callback(self, msg: Image):\n cv2_img = self._cv_bridge.imgmsg_to_cv2(msg, \"bgr8\")\n self._telegram_updater.bot.send_photo(\n self._telegram_chat_id,\n photo=BytesIO(cv2.imencode(\".jpg\", cv2_img)[1].tobytes()),\n caption=msg.header.frame_id,\n )",
"def image(fname):\n return cv2.imread(fname)",
"def hload_cv2(filepath):\n img = cv2.imread(filepath, cv2.IMREAD_COLOR)\n cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)\n return img",
"def convert_imgpil(self, pic):\n curr_pic = cv2.cvtColor(pic, cv2.COLOR_BGR2RGBA)\n return Image.fromarray(curr_pic) # Convert image for PIL",
"def load_image(self, image_id):\n # Load image\n# print(self.image_info[image_id]['path'])\n image = cv2.imread(self.image_info[image_id]['path'],cv2.IMREAD_GRAYSCALE) \n image = image[:,:, np.newaxis] #Add 1 dimension for grayscale images\n return image",
"def process(self):\n self.output_image = cv.cvtColor(self.input_image, cv.COLOR_BGR2GRAY)\n cv.COLOR_BAYER_BG2GRAY\n return self.output_image",
"def make_image(file):\n image = cv2.imread(file, 0)\n image = cv2.resize(image, (IMG_SIZE, IMG_SIZE))\n return np.array([np.array(image), np.array([0, 0])])"
]
| [
"0.8005469",
"0.6535097",
"0.64916354",
"0.6398854",
"0.63530976",
"0.62878424",
"0.6284432",
"0.6251695",
"0.6217127",
"0.61710984",
"0.6155203",
"0.6151461",
"0.61286163",
"0.6122457",
"0.610725",
"0.6069343",
"0.6057519",
"0.6040793",
"0.6013767",
"0.6012021",
"0.59809417",
"0.5978085",
"0.5945558",
"0.5925993",
"0.5880497",
"0.5867955",
"0.5861953",
"0.5841246",
"0.58338356",
"0.5829201"
]
| 0.87341195 | 0 |
Converts a cvimage into grayscale | def cvimage_grayscale( cv_image ):
grayscale = cv.CreateImage( cv.GetSize( cv_image ), 8, 1 )
cv.CvtColor( cv_image, grayscale, cv.CV_RGB2GRAY )
return grayscale | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def grayscale_image(input_image):\n return cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)",
"def convert_to_gray(image):\n return cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)",
"def grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)",
"def grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)",
"def grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)",
"def grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)",
"def grayscale(img):\n\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)",
"def grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)",
"def grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)",
"def grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)",
"def grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)",
"def grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)",
"def grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)",
"def grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)",
"def grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)",
"def grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)",
"def grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)",
"def grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n #return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)",
"def grayscale(img):\n\tif img is None:\n\t\tprint \"Img is None\"\n\t\tsys.exit()\n\tif len(img.shape) > 2:\n\t\treturn cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\telse:\n\t\treturn img",
"def convert_to_gray_scale(img):\r\n #reading image\r\n im = Image.open(\"filename\")\r\n\r\n if im.mode != \"L\":\r\n im = im.convert(\"L\")\r\n\r\n return img",
"def grayscale_image(image):\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray_image = cv2.bitwise_not(gray_image)\n\n if DEBUG:\n cv2.imwrite(\"tmp/tmp_grayscale.png\", gray_image)\n\n return gray_image",
"def gray_scale_img(img):\n if len(img.shape) == 2:\n img_gray = img.copy()\n elif len(img.shape) == 3:\n if img.shape[2] == 1:\n img_gray = img[:, :, 0].copy()\n else:\n img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n return img_gray",
"def togray( self, img ):\n if( len(img.shape) == 2): return img\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n return gray",
"def convert_to_gray(img):\n # split pixel into different b g r colors\n b, g, r = cv2.split(img)\n # calculate the gray color\n gray = 0.3 * r + 0.59 * g + 0.11 * b\n return gray",
"def rgb2grayscale(image):\r\n\r\n assert image.ndim == 3 and image.shape[2] == 3\r\n\r\n gray_image = np.dot(image, [0.2989, 0.5870, 0.1140]).astype(np.uint8)\r\n\r\n return gray_image",
"def grayscale(img, format=\"RGB\"):\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n if format == \"RGB\":\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n elif format == \"BGR\":\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n elif format == \"HSV\":\n \"\"\"\n The conversion from HSV to gray is not necessary: you already have it.\n You can just select the V channel as your grayscale image\n \"\"\"\n return img[:, :, 2]",
"def rgb2gray(img):\r\n return 0.2989 * img[..., 0] + 0.587 * img[..., 1] + 0.114 * img[..., 2]",
"def to_grayscale(self):\n if len(self.img.shape) in (3, 4):\n gray = opencv.cvtColor(self.img, opencv.COLOR_BGR2GRAY)\n return Image(gray)\n else:\n assert len(self.img.shape) == 2\n return Image(self.img)",
"def to_grayscale(image):\n # Credit sastanin (https://stackoverflow.com/a/3935002)\n if len(image.shape) == 3:\n return np.average(image, -1) # average the last axis (color channels)\n else:\n return image",
"def imgfile_to_grayscale(filename):\n img = cv2.imread(filename)\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)"
]
| [
"0.8324178",
"0.82224435",
"0.8202237",
"0.8202237",
"0.8202237",
"0.8202237",
"0.81656337",
"0.79627913",
"0.79627913",
"0.79627913",
"0.79627913",
"0.79627913",
"0.79627913",
"0.79627913",
"0.79627913",
"0.79627913",
"0.79627913",
"0.7926543",
"0.79139477",
"0.7773947",
"0.7692095",
"0.76902056",
"0.7613483",
"0.7552787",
"0.7548144",
"0.74625444",
"0.7420254",
"0.7380352",
"0.732321",
"0.7302684"
]
| 0.86633444 | 0 |
Return the last gfxinfo dump from the frame collector's raw output. | def gfxinfo_get_last_dump(filepath):
record = ''
with open(filepath, 'r') as fh:
fh_iter = _file_reverse_iter(fh)
try:
while True:
buf = next(fh_iter)
ix = buf.find('** Graphics')
if ix >= 0:
return buf[ix:] + record
ix = buf.find(' **\n')
if ix >= 0:
buf = next(fh_iter) + buf
ix = buf.find('** Graphics')
if ix < 0:
msg = '"{}" appears to be corrupted'
raise RuntimeError(msg.format(filepath))
return buf[ix:] + record
record = buf + record
except StopIteration:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def frame(self):\n try:\n AppHelper.runConsoleEventLoop(installInterrupt=True)\n return str(self._delegate.frame.representations()[0].TIFFRepresentation().bytes())\n except:\n return None",
"def grabRawFrame(self):\r\n \r\n self.surface = self.capture.get_image(self.surface)\r\n width, height = self.surface.get_size()\r\n return pygame.image.tostring(self.surface, 'RGB'), width, height, 1",
"def get_frame(self):\n return self.last_frame",
"def captureimage(self):\n if not self.total_time:\n return self.frames[-1]\n return None",
"def getFrame(self):\n s, image = self.capture.read()\n return image",
"def grab_frame(self):\n with self._buflock:\n if self._buffer is None:\n return None\n buf = self._buffer.tostring()\n return buf",
"def read(self):\r\n frame = self.last_frame\r\n return frame",
"def get_last_save_info(self) -> Any:\n return self._bin_iter.get_last_save_info()",
"def output_frame(self):\n if self._pipeline:\n frame = self._pipeline[-1].frame\n if not isinstance(frame, str):\n frame = frame.name\n return getattr(self, frame)\n else:\n return None",
"def screenshot_base(self, previous):\n\tif self.internal.device.google_experience or not getattr(\n\t\t\tself.internal.settings, 'LV_USE_FRAMEBUFFER_COMMAND', False):\n\t\treturn previous(self)\n\n\treturn BytesIO(binascii.a2b_hex(''.join(\n\t\t\tself.internal.transport.view_server_query('FRAMEBUFFER\\n',\n\t\t\t\t\t2*self.internal.settings.INTERNAL_VIEW_SERVER_QUERY_DEFAULT_TIMEOUT))))",
"def dump_full_game(self):\n\n if self.state != STATE_GAME_FINISHED:\n raise Exception('Game is not finished, cannot dump full infos.')\n\n return self.game_log",
"def get_latest_valid_picture(self):\n return self.buffer[self.buffer_index]",
"async def _retrieve_frame(self, mode: BufferRetrieveMode) -> RawArray:",
"def get_info(self):\n\t\tret = 'Flash info\\n'\n\t\tret += '\\tGPNVM bits: ' + str(self.read_gpnvm()) + '\\n'\n\t\tret += '\\tUnique identifier area: ' + self.read_unique_identifier_area().decode('ascii', 'replace') + '\\n'\n\t\tret += '\\tDescriptor: ' + str(self.read_descriptor()) + '\\n'\n\t\treturn ret",
"def read_frame(self):\n return self.decode_frame(self.grab_frame())",
"def _GetSurfaceFlingerFrameData(self):\n window_name = self._GetSurfaceViewWindowName()\n command = ['dumpsys', 'SurfaceFlinger', '--latency']\n # Even if we don't find the window name, run the command to get the refresh\n # period.\n if window_name:\n command.append(window_name)\n output = self._device.RunShellCommand(command, check_return=True)\n return ParseFrameData(output, parse_timestamps=bool(window_name))",
"def get_raw_output(self):\n return self._engine.get_raw_output()",
"def grabFrame(self):\r\n \r\n data, w, h, orientation = self.grabRawFrame()\r\n return Image.fromstring(\"RGB\", (w, h), data, \"raw\", \"BGR\", 0, orientation)",
"def get_full_output(self):\n if self.full_output:\n return self.full_output",
"def get_still(self):\n _, frame = self.client.read()\n return frame",
"def screen_shot():\n screen_shot_string_io = StringIO.StringIO()\n ImageGrab.grab().save(screen_shot_string_io, \"PNG\")\n screen_shot_string_io.seek(0)\n return screen_shot_string_io.read()",
"def get_new_image(self):\n img = self.vid_mem_reader.get_latest_image()\n if not img: \n return None\n img = img[0]\n return convert_16to8(img)",
"def _getframe(depth=None): # real signature unknown; restored from __doc__\n pass",
"def get_frame(self):\n return opencv.highgui.cvQueryFrame(self.capture)",
"def curframe(self):\n return self._stack[self._curframe_index][0]",
"def get_image(self):\n self.flush_buffer()\n _, frame = self.cam.read()\n shift_frame = self.perspective_shift(frame)\n #shift_frame = None\n return frame, shift_frame",
"def current_frame(self) -> str:\n return self.frames[self.frame]",
"def frame_info( self ):\n return self.__frame_id, int(self.__frame_timestamp)",
"def most_recent_damaged_pos(self):\n\n idx = len(self.oxygen_profile) - self.count - 1\n if idx < 0:\n idx = 0\n return (self.oxygen_cards[idx].render_pos[0] + self.pos[0],\n self.oxygen_cards[idx].render_pos[1] + self.pos[1])",
"def frame(self):\n return self._frame"
]
| [
"0.64934605",
"0.61334354",
"0.6007916",
"0.5989006",
"0.5923391",
"0.58175516",
"0.5735115",
"0.5702237",
"0.5687114",
"0.56412464",
"0.5596677",
"0.55304444",
"0.54142034",
"0.53991956",
"0.539123",
"0.5387218",
"0.53756607",
"0.5365995",
"0.5358634",
"0.5358058",
"0.53229886",
"0.5311527",
"0.52748704",
"0.5260412",
"0.52525294",
"0.52511305",
"0.5235055",
"0.5228518",
"0.52266467",
"0.51930106"
]
| 0.70381945 | 0 |
Return the instantaneous average velocity averaged over all cars | def global_average_speed(cars):
velocities = [car.velocity for car in cars]
average_speed = sum(velocities)/len(cars)
return average_speed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def class_average_speed(cars):\n # Sort by class name\n class_sorted = sorted(cars, key=lambda car: type(car).__name__)\n class_velocities = []\n class_names = []\n # Group the cars of same class and average their velocities, save class names\n for key, group in groupby(cars, key=lambda car: type(car).__name__):\n velocities = [car.velocity for car in group]\n class_velocity = sum(velocities) / len(velocities)\n class_velocities.append([class_velocity])\n class_names.append(key)\n return class_velocities, class_names",
"def getavgvel(self):\n if self.total_time:\n return (6.28)/(self.total_time)",
"def enstrophy_average(\n omega1, # vorticity-1 component\n omega2, # vorticity-2 component\n omega3): # vorticity-3 component\n #---------------------------------------------------------------------#\n # Defining the domain variables #\n #---------------------------------------------------------------------#\n dim = omega1.shape\n time = dim[-1]\n avg = np.zeros(time)\n #---------------------------------------------------------------------#\n # Looping over the time variable #\n #---------------------------------------------------------------------#\n print_count = 51\n for i in range(0, time):\n term1 = np.square(omega1[:,:,:,i])\n term2 = np.square(omega2[:,:,:,i])\n term3 = np.square(omega3[:,:,:,i])\n enst = 0.5*(term1 + term2 + term3)\n avg[i] = np.mean(enst)\n #-----------------------------------------------------------------#\n # Printing statement #\n #-----------------------------------------------------------------#\n if print_count > 20:\n print('Enstrophy average ---> t_step = %i' %(i))\n print_count = 0\n print_count += 1\n\n return avg",
"def average(cls, vectors):\n return cls.sum(vectors) / len(vectors)",
"def avg(self):\n return sum(self.times) / len(self.times)",
"def avg(self):\n return sum(self.times) / len(self.times)",
"def avg(self):\n return sum(self.times) / len(self.times)",
"def AllFreAverageV(self,):\n \t\tv_array = self.data\n \t\taaverage_v = np.average(v_array[:,1])\n \t\tprint('Whole frequency average group velocity:\\nVw=',aaverage_v/1000,'km/s')\n \t\treturn",
"def compute_energy(self):\n energy = 0.5 * self.masses * np.sum(self.velocities * self.velocities, axis=1)\n avg_energy = np.mean(energy) # average kinetic energy of all particles\n return avg_energy",
"def avg_data_each_h(nof, lof, x_vel, y_vel, z_vel):\n sx_vel = []\n sy_vel = []\n sz_vel = []\n\n for i in range(lof):\n sx = 0\n sy = 0\n sa = 0\n for j in range(nof):\n sx += x_vel[i + (j * nof)]\n sy += y_vel[i + (j * nof)]\n sa += z_vel[i + (j * nof)]\n\n sx_vel.append(sx)\n sy_vel.append(sy)\n sz_vel.append(sa)\n\n # checks lengths match and then averages them and returns the average velocities\n if len(sx_vel) == len(sy_vel) and len(sx_vel) == len(sz_vel):\n if len(sx_vel) == lof:\n ax_vel = np.array(sx_vel) / nof\n ay_vel = np.array(sy_vel) / nof\n az_vel = np.array(sz_vel) / nof\n\n return ax_vel, ay_vel, az_vel\n else:\n print \"Error: summed velocity array is the wrong length!\"\n else:\n print \"Error: summed velocity data not matching!\"",
"def averageTime(self):\n \n pass",
"def average(self, times=2):\n for i in range(times):\n self.statistics()\n global t, avlist\n length = len(t)\n avlist.append(t)\n t = []\n\n total_list = []\n\n for l in range(length):\n total_list.append([])\n\n for j in range(times):\n \"\"\"per time\"\"\"\n for i in range(length):\n total_list[i].append(avlist[j][i])\n\n \"\"\"calculate\"\"\"\n ylist = []\n avlist = []\n for a in total_list:\n avg = 0\n for b in a:\n avg += b\n ylist.append(avg/times)\n self.listy = ylist\n\n for e in range(self.el[self.re[0]], self.re[1], self.re[2]):\n self.listx.append(e)",
"def average_speed(self):\n for s in self.speeds:\n self.speed_x += s.x\n self.speed_y += s.y\n\n self.average_speed_x = self.speed_x / len(self.speeds)\n self.average_speed_y = self.speed_y / len(self.speeds)\n return Vec2d(self.average_speed_x, self.average_speed_y)",
"def calcDVavg(supplyvol, demandvol):\n dvavg = (supplyvol - demandvol)/(0.5 * (supplyvol + demandvol))\n return dvavg",
"def get_average_survival(self):\n return np.mean(self.survival_rates)",
"def average(self):\n return self.summation() / self.count()",
"def calc_10M_average(self, inputs):\n self.Vm_sum += self.getAtt('Vm', inputs)\n if self.time % 10 == 0:\n if self.time == 0:\n average = self.Vm_sum / 2\n else:\n average = self.Vm_sum / 10\n self.Vm_10M_average = average\n self.Vm_sum = 0.0",
"def avgtr(self):\n return np.diff(self.trtimes).mean()",
"def average_speed(self):\n return self._average_speed",
"def average(self):\n if self._average is None: # only first time\n self._average = self._obj.mean(dim='t')\n self._average.attrs = self._obj.attrs # we need units in quiver\n\n return self._average",
"def average_speed(self): # pylint: disable=no-self-use\n query = read_sql(\"ave_speed.sql\")\n ave_speed_df = get_dataframe_from_bigquery(query, multipart=True)\n ave_speed_df.pipe(save_to_gcs, settings.ASSETS.FILES.AVESPEED)",
"def mean_vol(df):\n return df.tail(5)['volume'].mean(), df.tail(20)['volume'].mean()",
"def average_speed(self):\n return self.total_distance * 3600 / self.total_time",
"def avg_temps(self):\r\n average_temp = 0\r\n for j in range(len(self.trip)):\r\n average_temp += self.trip[j].get_temperature(j)\r\n average_temp /= len(self.trip)\r\n return average_temp",
"def average_concentration():\n x10 = 0 # HCl gas fraction on the droplet surface, [/]\n x30 = pwater / pre # water vapour fraction in the pipe, [/]\n x11 = phcl / pre # HCl gas fraction in the pipe, [/]\n x31 = 12.3e-3 # water vapour fraction on the droplet surface, [/]\n if x30 < 0.02 and x31 < 0.02:\n x30 = 0.0\n x31 = 0.0 # the fraction is pretty low, thus neglect the water part\n x20 = 1 - x10 - x30 # other gas fraction on the droplet surface, [/]\n x21 = 1 - x11 - x31 # other gas fraction in the pipe, [/]\n x1d = x10 - x11 # HCl fraction difference, [/]\n x1_bar = (x10 + x11) / 2 # HCl average fraction, [/]\n x2d = x20 - x21 # Natural gas fraction difference, [/]\n x2_bar = (x20 + x21) / 2 # natural gas average fraction, [/]\n x3d = x30 - x31 # water fraction difference, [/]\n x3_bar = (x30 + x31) / 2 # water average fraction, [/]\n return x1d, x1_bar, x2d, x2_bar, x3d, x3_bar",
"def aver_and_var(self):\n # assert not self.is_empty\n\n for axis in range(3):\n c1, c2 = self.bounds[axis]\n w = self.n_pix_partial[axis]\n aver = np.average(np.arange(c1, c2), weights=w)\n var = np.average(np.arange(c1, c2)**2, weights=w) - aver ** 2 # D = E(X^2) - (EX)^2\n yield aver, var",
"def average(data):\n return np.average(data)",
"def mean(self):\n return self.vmean",
"def average(self):\n return (self.current + self.last) / 2.0",
"def compute_average(vec_list):\r\n return np.sum(vec_list, axis = 0)/len(vec_list)"
]
| [
"0.7477088",
"0.66271657",
"0.6465083",
"0.64439666",
"0.64348084",
"0.64348084",
"0.64348084",
"0.6413489",
"0.6235546",
"0.6192724",
"0.61520684",
"0.61391675",
"0.6107132",
"0.60472494",
"0.6043459",
"0.6041461",
"0.60177046",
"0.6006069",
"0.6000753",
"0.592879",
"0.59059465",
"0.59028155",
"0.5893592",
"0.58802146",
"0.5850874",
"0.58359706",
"0.5812554",
"0.5788207",
"0.57776564",
"0.5771276"
]
| 0.8024266 | 0 |
Return the instantaneous average velocity for each class of cars Return class_velocity list of average velocities for class in class_names class_names list of class names of active cars | def class_average_speed(cars):
# Sort by class name
class_sorted = sorted(cars, key=lambda car: type(car).__name__)
class_velocities = []
class_names = []
# Group the cars of same class and average their velocities, save class names
for key, group in groupby(cars, key=lambda car: type(car).__name__):
velocities = [car.velocity for car in group]
class_velocity = sum(velocities) / len(velocities)
class_velocities.append([class_velocity])
class_names.append(key)
return class_velocities, class_names | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def global_average_speed(cars):\n velocities = [car.velocity for car in cars]\n average_speed = sum(velocities)/len(cars)\n return average_speed",
"def average(cls, vectors):\n return cls.sum(vectors) / len(vectors)",
"def get_velocity(self):\n\n vs = []\n pairs = [(-2, -1), (-3, -1), (-3, -1)]\n\n for i1, i2 in pairs:\n f1 = self.files[i1]\n p1 = Profile(os.path.join(self.name, f1))\n\n f2 = self.files[i2]\n p2 = Profile(os.path.join(self.name, f2))\n\n # we'll do this by looking at 3 different temperature\n # thresholds and averaging\n T_ref = [2.e9, 3.e9, 4.e9]\n\n for T0 in T_ref:\n x1 = p1.find_x_for_T(T0)\n x2 = p2.find_x_for_T(T0)\n vs.append((x1 - x2)/(p1.time - p2.time))\n\n vs = np.array(vs)\n v = np.mean(vs)\n v_sigma = np.std(vs)\n return v, v_sigma",
"def class_average(X):\n\t# compute average row vector\n\tmean_vector = np.mean(X, axis = 0)\n\treturn(mean_vector)",
"def calc_avg_vel(position, step_size, avg_quantity):\n avg_disp = int(math.floor(avg_quantity / 2))\n start_frame = step_size + avg_disp + 1\n end_frame = len(position) - avg_disp\n print(\"Calculating velocities from frames\", start_frame, \"to\", end_frame)\n com_vel = []\n for i in range(start_frame, end_frame + 1):\n com_vel.append(calc_avg_vel_frame(position, step_size, i, avg_quantity))\n return start_frame, end_frame, com_vel",
"def average(self, times=2):\n for i in range(times):\n self.statistics()\n global t, avlist\n length = len(t)\n avlist.append(t)\n t = []\n\n total_list = []\n\n for l in range(length):\n total_list.append([])\n\n for j in range(times):\n \"\"\"per time\"\"\"\n for i in range(length):\n total_list[i].append(avlist[j][i])\n\n \"\"\"calculate\"\"\"\n ylist = []\n avlist = []\n for a in total_list:\n avg = 0\n for b in a:\n avg += b\n ylist.append(avg/times)\n self.listy = ylist\n\n for e in range(self.el[self.re[0]], self.re[1], self.re[2]):\n self.listx.append(e)",
"def class_average(images,ref=None,niter=1,normproc=(\"normalize.edgemean\",{}),prefilt=0,align=(\"rotate_translate_flip\",{}),\n\t\taligncmp=(\"ccc\",{}),ralign=None,raligncmp=None,averager=(\"mean\",{}),scmp=(\"ccc\",{}),keep=1.5,keepsig=1,automask=0,saveali=0,verbose=0,callback=None,center=\"xform.center\"):\n\n\tif verbose>2 : print \"class_average(\",images,ref,niter,normproc,prefilt,align,aligncmp,ralign,raligncmp,averager,scmp,keep,keepsig,automask,verbose,callback,\")\"\n\n\t# nimg is the number of particles we have to align/average\n\tif isinstance(images[0],EMData) : nimg=len(images)\n\telif isinstance(images[0],str) and isinstance(images[1],int) : nimg=len(images)-1\n\telse : raise Exception,\"Bad images list (%s)\"%str(images)\n\n\tif verbose>2 : print \"Average %d images\"%nimg\n\n\t# If one image and no reference, just return it\n\tif nimg==1 and ref==None : return (get_image(images,0,normproc),[(0,Transform(),1)])\n\n\t# If one particle and reference, align and return\n\tif nimg==1:\n\t\tif averager[0]!=\"mean\" : raise Exception,\"Cannot perform correct average of single particle\"\n\t\tali=align_one(get_image(images,0,normproc),ref,prefilt,align,aligncmp,ralign,raligncmp)\n\t\ttry: ali[\"model_id\"]=ref[\"model_id\"]\n\t\texcept: pass\n\t\tsim=ali.cmp(scmp[0],ref,scmp[1])\t\t\t# compare similarity to reference (may use a different cmp() than the aligner)\n\t\treturn (ali,[(sim,ali[\"xform.align2d\"],1)])\n\n\t# If we don't have a reference image, we need to make one\n\tif ref==None :\n\t\tif verbose : print \"Generating reference\"\n#\t\tsigs=[(get_image(i)[\"sigma\"],i) for i in range(nimg)]\t\t# sigma for each input image, inefficient\n#\t\tref=get_image(images,max(sigs)[1])\n\t\tref=get_image(images,0,normproc)\t\t\t\t\t\t\t\t\t\t# just start with the first, as EMAN1\n\n\t\t# now align and average the set to the gradually improving average\n\t\tfor i in range(1,nimg):\n\t\t\tif verbose>1 :\n\t\t\t\tprint \".\",\n\t\t\t\tsys.stdout.flush()\n\t\t\tali=align_one(get_image(images,i,normproc),ref,prefilt,align,aligncmp,ralign,raligncmp)\n\t\t\tref.add(ali)\n\n\t\t# A little masking and centering\n\t\ttry:\n\t\t\tgmw=max(5,ref[\"nx\"]/16)\t\t# gaussian mask width\n\t\t\t#ref.process_inplace(\"filter.highpass.gauss\",{\"cutoff_pixels\":min(ref[\"nx\"]/10,5)})\t# highpass to reduce gradient issues\n\t\t\t#ref.process_inplace(\"normalize.circlemean\")\n\t\t\t#ref2=ref.process(\"mask.gaussian\",{\"inner_radius\":ref[\"nx\"]/2-gmw,\"outer_radius\":gmw/1.3})\n\t\t\t#ref2.process_inplace(\"filter.lowpass.gauss\",{\"cutoff_abs\":0.07})\t# highpass to reduce gradient issues\n\t\t\t#ref2.process_inplace(\"normalize.circlemean\")\n\t\t\t#ref2.process_inplace(\"threshold.binary\",{\"value\":ref[\"mean\"]+ref[\"sigma\"]*1.5})\n\t\t\t#ref2.process_inplace(\"xform.centerofmass\",{\"threshold\":0.5})\t\t\t\t\t\t# TODO: should probably check how well this works\n\t\t\t#fxf=ref2[\"xform.align2d\"]\n\t\t\t#ref.translate(fxf.get_trans())\n\t\t\t\n\t\t\tif center:\t#jesus\n\t\t\t\tref.process_inplace(center)\n\t\t\tref.process_inplace(\"normalize.circlemean\",{\"radius\":ref[\"nx\"]/2-gmw})\n\t\t\tref.process_inplace(\"mask.gaussian\",{\"inner_radius\":ref[\"nx\"]/2-gmw,\"outer_radius\":gmw/1.3})\n\t\t\tref_orient=None\n\t\texcept:\n\t\t\ttraceback.print_exc()\n\telse:\n\t\ttry: ref_orient=ref[\"xform.projection\"]\n\t\texcept: ref_orient=None\n\n\t\ttry: ref_model=ref[\"model_id\"]\n\t\texcept: ref_model=0\n\n\tif verbose>1 : print \"\"\n\n\tinit_ref=ref.copy()\n\n\t# Iterative alignment\n\tptcl_info=[None]*nimg\t\t# empty list of particle info\n\n\t# This is really niter+1 1/2 iterations. It gets terminated 1/2 way through the final loop\n\tfor it in range(niter+2):\n\t\tif verbose : print \"Starting iteration %d\"%it\n\t\tif callback!=None : callback(int(it*100/(niter+2)))\n\n\t\tmean,sigma=0.0,1.0\t\t# defaults for when similarity isn't computed\n\n\t\t# Evaluate quality from last iteration, and set a threshold for keeping particles\n\t\tif it>0:\n\t\t\t# measure statistics of quality values\n\t\t\tmean,sigma=0,0\n\t\t\tfor sim,xf,use in ptcl_info:\n\t\t\t\tmean+=sim\n\t\t\t\tsigma+=sim**2\n\t\t\tmean/=len(ptcl_info)\n\t\t\tsigma=sqrt(sigma/len(ptcl_info)-mean**2)\n\n\t\t\t# set a threshold based on statistics and options\n\t\t\tif keepsig:\t\t\t\t\t# keep a relative fraction based on the standard deviation of the similarity values\n\t\t\t\tthresh=mean+sigma*keep\n\t\t\t\tif verbose>1 : print \"mean = %f\\tsigma = %f\\tthresh=%f\"%(mean,sigma,thresh)\n\t\t\telse:\t\t\t\t\t\t# keep an absolute fraction of the total\n\t\t\t\tl=[i[0] for i in ptcl_info]\n\t\t\t\tl.sort()\n\t\t\t\ttry: thresh=l[int(len(l)*keep)]\n\t\t\t\texcept:\n\t\t\t\t\tif verbose: print \"Keeping all particles\"\n\t\t\t\t\tthresh=l[-1]+1.0\n\n\t\t\tif verbose:\n\t\t\t\tprint \"Threshold = %1.4f Quality: min=%f max=%f mean=%f sigma=%f\"%(thresh,min(ptcl_info)[0],max(ptcl_info)[0],mean,sigma)\n\n\t\t\t# mark the particles to keep and exclude\n\t\t\tnex=0\n\t\t\tfor i,pi in enumerate(ptcl_info):\n\t\t\t\tif pi[0]>thresh :\n\t\t\t\t\tnex+=1\n\t\t\t\t\tptcl_info[i]=(pi[0],pi[1],0)\n\t\t\t\telif pi[2]==0:\n\t\t\t\t\tptcl_info[i]=(pi[0],pi[1],1)\n\n\t\t\tif verbose : print \"%d/%d particles excluded\"%(nex,len(ptcl_info))\n\n\t\t\t# if all of the particles were thrown out for some reason, we keep the best one\n\t\t\tif nex==len(ptcl_info) :\n\t\t\t\tbest=ptcl_info.index(min(ptcl_info))\n\t\t\t\tptcl_info[best]=(ptcl_info[best][0],ptcl_info[best][1],1)\n\t\t\t\tif verbose : print \"Best particle reinstated\"\n\n\t\tif it==niter+1 : break\t\t# This is where the loop actually terminates. This makes sure that inclusion/exclusion is updated at the end\n\n\t\t# Now align and average\n\t\tavgr=Averagers.get(averager[0], averager[1])\n\t\tfor i in range(nimg):\n\t\t\tif callback!=None and nimg%10==9 : callback(int((it+i/float(nimg))*100/(niter+2.0)))\n\t\t\tptcl=get_image(images,i,normproc)\t\t\t\t\t# get the particle to align\n\t\t\tali=align_one(ptcl,ref,prefilt,align,aligncmp,ralign,raligncmp) # align to reference\n\t\t\tsim=ali.cmp(scmp[0],ref,scmp[1])\t\t\t# compare similarity to reference (may use a different cmp() than the aligner)\n\t\t\tif saveali and it==niter : ali.write_image(\"aligned.hdf\",-1)\n\n\t\t\ttry: use=ptcl_info[i][2]\n\t\t\texcept: use=1\n\t\t\tif use :\n\t\t\t\tavgr.add_image(ali)\t\t\t\t# only include the particle if we've tagged it as good\n\t\t\t\tif verbose>1 :\n\t\t\t\t\tsys.stdout.write(\".\")\n\t\t\t\t\tsys.stdout.flush()\n\t\t\telif verbose>1:\n\t\t\t\tsys.stdout.write(\"X\")\n\t\t\t\tsys.stdout.flush()\n\t\t\tptcl_info[i]=(sim,ali[\"xform.align2d\"],use)\n\n\t\tif verbose>1 : print \"\"\n\n\t\tref=avgr.finish()\n\t\tref[\"class_ptcl_qual\"]=mean\n\t\tref[\"class_ptcl_qual_sigma\"]=sigma\n\n\t\t# A little masking before the next iteration\n\t\tgmw=max(5,ref[\"nx\"]/12)\t\t# gaussian mask width\n\t\tref.process_inplace(\"normalize.circlemean\",{\"radius\":ref[\"nx\"]/2-gmw})\n\t\tif automask :\n\t\t\tref.process_inplace(\"mask.auto2d\",{\"nmaxseed\":10,\"nshells\":gmw-2,\"nshellsgauss\":gmw,\"sigma\":0.2})\n\t\telse :\n\t\t\tref.process_inplace(\"mask.gaussian\",{\"inner_radius\":ref[\"nx\"]/2-gmw,\"outer_radius\":gmw/1.3})\n\n\tif ref_orient!=None :\n\t\tref[\"xform.projection\"]=ref_orient\n\t\tref[\"model_id\"]=ref_model\n\treturn [ref,ptcl_info]",
"def class_average(examples):\n averages = np.zeros((1, attribute_count))\n\n # if we have no examples, then we'll just end early\n if len(examples) == 0:\n return averages\n\n for ex in examples:\n averages += ex.row\n\n return averages / len(examples)",
"def AllFreAverageV(self,):\n \t\tv_array = self.data\n \t\taaverage_v = np.average(v_array[:,1])\n \t\tprint('Whole frequency average group velocity:\\nVw=',aaverage_v/1000,'km/s')\n \t\treturn",
"def getavgvel(self):\n if self.total_time:\n return (6.28)/(self.total_time)",
"def hotaverage( names):\n rs = radioastronomy.Spectrum() # create input and average structures\n nhot = 0\n\n avenames = names # create a list of files to average\n\n # for all input files\n for filename in names:\n\n parts = filename.split('/')\n nparts = len(parts)\n if nparts == 1:\n aname = parts[0]\n else:\n aname = parts[nparts-1]\n\n parts = aname.split('.')\n nparts = len(parts)\n if nparts < 2:\n print 'File is not an astronomy file: ',filename\n continue\n else:\n extension = parts[nparts-1]\n\n extension = extension.upper()\n if extension != 'HOT': # speed up by only looking at hot load files\n continue\n \n rs.read_spec_ast(filename)\n\n if rs.telel > 0: # only working with hot load, skip elevation > 0.\n continue\n\n avenames[nhot] = filename\n nhot = nhot + 1\n # end of for all files loop\n\n nhot, hot = average( avenames[0:nhot]) # now use generic program for averages\n if nhot < 1:\n print 'No hot load files; can not calibrate!'\n exit()\n\n return nhot, hot",
"def getAvgFeatureVecs(reviews, model, num_features):\n\t# Initialize a counter \n\tcounter = 0.\n\t# Preallocate a 2D numpy array, for speed\n\treviewFeatureVecs = np.zeros((len(reviews),num_features),dtype=\"float32\")\n\t\t \n\t# Loop through the reviews \n\tfor review in reviews:\n\t\t# Print a status message every 1000th review \n\t\tif counter%1000. == 0.:\n\t\t\tprint \"Review %d of %d\" % (counter, len(reviews))\n\t\t\t# \n\t\t\t# Call the function (defined above) that makes average feature vectors \n\t\treviewFeatureVecs[counter] = makeFeatureVec(review, model, num_features)\n\t\t# Increment the counter \n\t\tcounter = counter + 1.\n\treturn reviewFeatureVecs",
"def classProbs(observation, tree, classes):\n res = classify(observation, tree) #res = results\n total = sum(res.values())\n probs = []\n for c in classes:\n if c in res.keys():\n probs.append(float(res[c])/total)\n else:\n probs.append(0)\n return probs",
"def compute_accuracy(data, num_labels = 4): \n \n # Declarating list to store results\n accuracies = []\n \n for instance in data:\n \n # Declarating list to store individual results\n instance_accuracies = []\n \n for i in np.arange(num_labels):\n \n # Computing and storing accuracy for each class\n instance_accuracies.append(accuracy_score(instance[:, 2 + i], instance[:, 2 + i + 4]))\n \n # Storing mean results of the instance\n accuracies.append(np.mean(instance_accuracies))\n \n # Returning mean of all results\n return np.mean(accuracies)",
"def enstrophy_average(\n omega1, # vorticity-1 component\n omega2, # vorticity-2 component\n omega3): # vorticity-3 component\n #---------------------------------------------------------------------#\n # Defining the domain variables #\n #---------------------------------------------------------------------#\n dim = omega1.shape\n time = dim[-1]\n avg = np.zeros(time)\n #---------------------------------------------------------------------#\n # Looping over the time variable #\n #---------------------------------------------------------------------#\n print_count = 51\n for i in range(0, time):\n term1 = np.square(omega1[:,:,:,i])\n term2 = np.square(omega2[:,:,:,i])\n term3 = np.square(omega3[:,:,:,i])\n enst = 0.5*(term1 + term2 + term3)\n avg[i] = np.mean(enst)\n #-----------------------------------------------------------------#\n # Printing statement #\n #-----------------------------------------------------------------#\n if print_count > 20:\n print('Enstrophy average ---> t_step = %i' %(i))\n print_count = 0\n print_count += 1\n\n return avg",
"def target_velocity(self, time):\n\n avg_vel = (self.ar_tag_pos - self.start_pos)/self.total_time\n\n return avg_vel",
"def class_average_withali(images,ptcl_info,xform,ref,averager=(\"mean\",{}),normproc=(\"normalize.edgemean\",{}),setsfref=0,verbose=0):\n\n\tif isinstance(images[0],EMData) : nimg=len(images)\n\telif isinstance(images[0],str) and isinstance(images[1],int) : nimg=len(images)-1\n\telse : raise Exception,\"Bad images list\"\n\n\tincl=[]\n\texcl=[]\n#\txforms=[]\n\tavgr=Averagers.get(averager[0], averager[1])\n\tfor i in range(nimg):\n\t\timg=get_image(images,i,normproc)\n\t\tptcl_info[i]=(ptcl_info[i][0],xform*ptcl_info[i][1],ptcl_info[i][2])\t\t# apply the new Transform to the existing one\n#\t\tptcl_info[i]=(ptcl_info[i][0],ptcl_info[i][1]*xform,ptcl_info[i][2])\t\t# apply the new Transform to the existing one\n\t\timg.process_inplace(\"xform\",{\"transform\":ptcl_info[i][1]})\n\t\ttry: use=ptcl_info[i][2]\n\t\texcept: use=1\n\t\tif use :\n\t\t\tavgr.add_image(img)\t\t\t\t# only include the particle if we've tagged it as good\n\t\t\tif img.has_attr(\"source_n\") : incl.append(img[\"source_n\"])\n#\t\t\txforms.append(ptcl_info[i][1])\n\t\telif img.has_attr(\"source_n\") : excl.append(img[\"source_n\"])\n\n\tavg=avgr.finish()\n\n\t# normalize to the reference, this should make make3dpar work better as we can skip the normalization step\n\tif ref!=None :\n\t\tif setsfref:\n\t\t\tavg.process_inplace(\"filter.matchto\",{\"to\":ref,\"interpolate\":0,\"keephires\":1})\n\t\t\tavg-=avg.get_edge_mean()\n\t\telse : avg.process_inplace(\"normalize.toimage\",{\"to\":ref})\n\n\t\tavg[\"class_qual\"]=avg.cmp(\"ccc\",ref)\n\n\t# set some useful attributes\n\tif len(incl)>0 or len(excl)>0 :\n\t\tif len(incl)>0 : avg[\"class_ptcl_idxs\"]=incl\n\t\tif len(excl)>0 : avg[\"exc_class_ptcl_idxs\"]=excl\n#\t\tif len(xforms)>0: avg[\"class_ptcl_xforms\"]=xforms\n\t\tavg[\"class_ptcl_src\"]=img[\"source_path\"]\n\n\treturn avg",
"def avg_data_each_h(nof, lof, x_vel, y_vel, z_vel):\n sx_vel = []\n sy_vel = []\n sz_vel = []\n\n for i in range(lof):\n sx = 0\n sy = 0\n sa = 0\n for j in range(nof):\n sx += x_vel[i + (j * nof)]\n sy += y_vel[i + (j * nof)]\n sa += z_vel[i + (j * nof)]\n\n sx_vel.append(sx)\n sy_vel.append(sy)\n sz_vel.append(sa)\n\n # checks lengths match and then averages them and returns the average velocities\n if len(sx_vel) == len(sy_vel) and len(sx_vel) == len(sz_vel):\n if len(sx_vel) == lof:\n ax_vel = np.array(sx_vel) / nof\n ay_vel = np.array(sy_vel) / nof\n az_vel = np.array(sz_vel) / nof\n\n return ax_vel, ay_vel, az_vel\n else:\n print \"Error: summed velocity array is the wrong length!\"\n else:\n print \"Error: summed velocity data not matching!\"",
"def averageTime(self):\n \n pass",
"def calculate_mean_average_precision(class_name='', current_neuron_index=current_neuron_index, acts=acts, verbose=verbose, minx=0.000000001):\n #\n current_neuron = acts.get_activations_for_neuron(current_neuron_index) # get the neuron's data\n x_data = current_neuron.vector # get the activations without classes\n # grab your list of points\n local_list, selected_activations = grab_points_for_a_cluster(current_neuron_index,\n min_selected_x_data=minx,\n max_selected_x_data=max(x_data),\n acts=acts,\n x_data=x_data,\n verbose=verbose)\n Q = len(local_list) # total length of list\n # get the test class (this is the correct class or 'A')\n if class_name == '':\n test_class = local_list[-1][0]\n else:\n test_class = class_name\n # set up counters\n MAP = 0 # mean average precision\n count_of_test_class = 0\n # loop backwards through the list, abs j is the position in a 1-indexed list\n for i in range(Q+1):\n j = -(i + 1) # 1 indexed\n current_class = local_list[j][0] # current class\n if j == -Q:\n # if the whole of local_list is the same class (this accounts for zero indexing)\n if verbose:\n print(current_class)\n print('{}/{}'.format(count_of_test_class, abs(j)))\n j = j -1 # really this is here so we can check j\n break\n if (current_class == test_class):\n count_of_test_class = count_of_test_class + 1\n MAP = MAP + count_of_test_class/(abs(j)) # N.b. this is the sum, we divide by j on the output\n return MAP/Q",
"def calc_variables ( ):\n\n # In this example we simulate using the shifted-force potential only\n # The values of < p_sf >, < e_sf > and density should be consistent (for this potential)\n # There are no long-range or delta corrections\n\n from averages_module import VariableType\n \n # Preliminary calculations\n vol = box**3 # Volume\n rho = n / vol # Density\n\n # Variables of interest, of class VariableType, containing three attributes:\n # .val: the instantaneous value\n # .nam: used for headings\n # .method: indicating averaging method\n # If not set below, .method adopts its default value of avg\n # The .nam and some other attributes need only be defined once, at the start of the program,\n # but for clarity and readability we assign all the values together below\n\n # Move acceptance ratio\n m_r = VariableType ( nam = 'Move ratio', val = m_ratio, instant = False )\n\n # Internal energy per molecule (shifted-force potential)\n # Ideal gas contribution (assuming nonlinear molecules) plus total PE divided by N\n e_sf = VariableType ( nam = 'E/N shifted force', val = 3.0*temperature + total.pot/n )\n\n # Pressure (shifted-force potential)\n # Ideal gas contribution plus total virial divided by V\n p_sf = VariableType ( nam = 'P shifted force', val = rho*temperature + total.vir/vol )\n\n # Collect together into a list for averaging\n return [ m_r, e_sf, p_sf ]",
"def get_car_speeds(self):\n car_speeds = []\n for car in self.car_list:\n car_speeds.append(car.current_speed)\n return car_speeds",
"def avg_variables(ds1, ds2, lat, lon, z, p):\r\n \r\n T1 = ds1.temp.mean(dim='time').mean(dim='lon')\r\n T2 = ds2.temp.mean(dim='time').mean(dim='lon')\r\n T_avg = average(T1, T2, z, lat, 'lat', 'pfull', 'K')\r\n \r\n uz1 = ds1.ucomp.mean(dim='time').mean(dim='lon')\r\n uz2 = ds2.ucomp.mean(dim='time').mean(dim='lon')\r\n uz_avg = average(uz1, uz2, z, lat, 'lat', 'pfull', 'm/s')\r\n \r\n msf1 = v(ds1, p, lat)\r\n msf2 = v(ds2, p, lat)\r\n msf_avg = average(msf1, msf2, z, lat, 'lat', 'pfull', 'kg/s')\r\n \r\n return T_avg, uz_avg, msf_avg",
"def get_average_specs(route_vect):\n full_specs = json.loads(json.dumps(route_vect[0]))\n avg_specs = json.loads(json.dumps(route_vect[0]))\n var_specs = json.loads(json.dumps(route_vect[0]))\n\n # init full specs\n for spec in full_specs:\n for k in spec:\n spec[k] = []\n\n for route in route_vect[1:]:\n for i, spec in enumerate(avg_specs):\n for k in spec:\n # add specs\n avg_specs[i][k] += route[i][k]\n # append specs\n full_specs[i][k].append(route[i][k])\n\n # for each vehicle\n for i, spec in enumerate(avg_specs):\n # for each spec (e.g. load, points, distance)\n for k in spec:\n # compute average\n avg_specs[i][k] /= len(route_vect)\n # compute variance\n # https://stackabuse.com/calculating-variance-and-standard-deviation-in-python/\n var_specs[i][k] = math.sqrt(\n sum([(x - avg_specs[i][k]) ** 2 for x in full_specs[i][k]]) / len(route_vect))\n\n return avg_specs, var_specs, full_specs",
"def genre_average(genre_vectors):\n array = [vector for vector in genre_vectors]\n return np.average(array, axis=0)",
"def calculate_average_precision(class_name='', current_neuron_index=current_neuron_index, acts=acts,\n no_files_in_label=no_files_in_label, verbose=verbose, minx='',Q_stop=''):\n #\n current_neuron = acts.get_activations_for_neuron(current_neuron_index) # get the neuron's data\n x_data = current_neuron.vector # get the activations without classes\n if minx == '':\n minx = min(x_data) # this grabs all the points\n # grab your list of points\n local_list, selected_activations = grab_points_for_a_cluster(current_neuron_index,\n min_selected_x_data=minx,\n max_selected_x_data=max(x_data),\n acts=acts,\n x_data=x_data,\n verbose=verbose)\n if not Q_stop == '':\n Q = Q_stop\n else:\n Q = len(local_list) # total length of list\n # get the test class (this is the correct class or 'A')\n if class_name == '':\n test_class = local_list[-1][0]\n else:\n test_class = class_name\n N_test = no_files_in_label[test_class] # no of items in class A\n # set up counters\n AP = 0 # average precision\n count_of_test_class = 0\n # loop backwards through the list, abs j is the position in a 1-indexed list\n # values for i == -1\n# current_class = local_list[-1][0]\n# if (current_class == test_class):\n# count_of_test_class = count_of_test_class + 1 # we found A\n# precs_x = count_of_test_class /1\n recall_x = 0\n Ave_precs_x = 0\n for i in range(Q):\n j = -(i + 1) # 1 indexed\n recall_x_minus_1 = recall_x\n current_class = local_list[j][0] # current class\n if j == -Q:\n # if the whole of local_list is the same class (this accounts for zero indexing)\n if verbose:\n print(current_class)\n print('{}/{}'.format(count_of_test_class, abs(j)))\n j = j -1 # really this is here so we can check j\n #break\n if count_of_test_class == N_test:\n #we've found them all\n if verbose:\n print('found all {} of {}, stopping...'.format(N_test, current_class))\n print('{}/{}'.format(count_of_test_class, abs(j)))\n break\n if (current_class == test_class):\n count_of_test_class = count_of_test_class + 1 #n A\n precs_x = count_of_test_class /(abs(j)) # N.b. this is the sum, we divide by j on the output\n recall_x = count_of_test_class / N_test\n delta_recall_x = recall_x - recall_x_minus_1 # difference in recall between this point nd the next\n weight_precs_x = precs_x * delta_recall_x # weighted precsion at point x (we do average via weighted sum)\n Ave_precs_x = Ave_precs_x + weight_precs_x # average_precision evaluated at point x\n return Ave_precs_x, precs_x, recall_x",
"def compute_GS(GMtcs):\n\n GS = np.mean(GMtcs,axis=0) #average over voxels\n\n return GS",
"def load_class_avg(self, mrcs, factor):\n \n global shape\n \n projection_2D = {}\n extract_2D = {}\n \n if len(factor) == 0: # Empty entry, set factor 1\n factor = 1\n\n with mrcfile.open(mrcs) as mrc:\n for i, data in enumerate(mrc.data):\n projection_2D[i] = data\n mrc.close()\n \n shape = transform.rotate(projection_2D[0].copy(), 45, resize=True).shape[0]\n\n for k, avg in projection_2D.items():\n if factor == 1:\n extract_2D[k] = extract_class_avg(avg)\n else:\n scaled_img = transform.rescale(\n avg, \n scale=(1/float(factor)), \n anti_aliasing=True, \n multichannel=False, # Add to supress warning\n mode='constant' # Add to supress warning\n ) \n extract_2D[k] = extract_class_avg(scaled_img)\n\n return projection_2D, extract_2D",
"def acceleration(data_array, time=1):\n speed = DataOperation.speed(data_array)\n acc_values = np.zeros(speed.size)\n count = 1\n acc_values[0] = 0\n for d in speed[1:]:\n acc_values[count] = (d - speed[count-1])/3.6/time\n count += 1\n return acc_values",
"def coldaverage( names):\n\n rs = radioastronomy.Spectrum() # create input and average structures\n avenames = names # create an output list to average\n\n# assume only a limited range of galactic latitudes are available\n# not range above +/-60.\n use60Range = False\n minGlat = 90. # initialize to extremea\n maxGlat = -90.\n maxEl = -90.\n minEl = 90.\n ncold = 0\n\n # for all input files\n for filename in names:\n\n parts = filename.split('/')\n nparts = len(parts)\n if nparts == 1:\n aname = parts[0]\n else:\n aname = parts[nparts-1]\n\n parts = aname.split('.')\n nparts = len(parts)\n if nparts < 2:\n print 'File is not an astronomy file: ',filename\n continue\n else:\n extension = parts[nparts-1]\n\n extension = extension.upper()\n if extension != 'AST': # speed up by only looking at astronomy files\n continue\n \n rs.read_spec_ast(filename) # An observation, read values\n\n if rs.telel < 0: # only working with observations, skip elevation <= 0.\n continue\n\n maxGlat = max( rs.gallat, maxGlat)\n minGlat = min( rs.gallat, minGlat)\n maxEl = max( rs.telel, maxEl)\n minEl = min( rs.telel, minEl)\n # end for all files loop, looking for max el and latitude ranges\n\n # if any high galactic latitudes, use only above +/-60d \n if minGlat < -60. or maxGlat > 60.:\n minGlat = -60.\n maxGlat = 60.\n else: # else no high galactic latitude data\n # use highest galactic latitudes - +/-5.degrees\n if -minGlat > maxGlat: # if negative latitudes higher\n minGlat = minGlat + 5.\n maxGlat = 90.\n else: # else positive latitudes higher\n maxGlat = maxGlat - 5.\n minGlat = -90.\n\n # only use the elevations above 60 degrees, if any\n if maxEl > 60.:\n maxEl = 60.\n else:\n maxEl = maxEl - 10. #else must use highest elevations available\n\n # now average coldest data for calibration\n for filename in names:\n\n rs.read_spec_ast(filename)\n rs.azel2radec() # compute ra,dec from az,el\n\n if rs.telel < maxEl:\n continue\n\n if rs.gallat > maxGlat or rs.gallat < minGlat:\n avenames[ncold] = filename\n ncold = ncold + 1\n # end of for all files loop\n\n ncold, cold = average( avenames[0:ncold]) # now use generic program for averages\n if ncold < 1:\n print 'No Cold load files; can not calibrate!'\n exit()\n\n return ncold, cold, minEl, maxEl"
]
| [
"0.66806364",
"0.5879501",
"0.57972777",
"0.5706217",
"0.546517",
"0.54273754",
"0.5395548",
"0.5379459",
"0.5367989",
"0.5266238",
"0.5126426",
"0.51186705",
"0.5099308",
"0.50899357",
"0.5079416",
"0.5076849",
"0.50666314",
"0.50658333",
"0.50573754",
"0.5054296",
"0.50531715",
"0.50285524",
"0.5020868",
"0.5020029",
"0.49921867",
"0.49899304",
"0.49784842",
"0.49602708",
"0.4934717",
"0.4924326"
]
| 0.84505486 | 0 |
Compute the nodal sum of values defined on elements. | def nodalSum(val,elems,work,avg):
nodes = unique1d(elems)
for i in nodes:
wi = where(elems==i)
vi = val[wi]
if avg:
vi = vi.sum(axis=0)/vi.shape[0]
else:
vi = vi.sum(axis=0)
val[wi] = vi | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nodalSum2(val,elems,tol):\n nodes = unique1d(elems)\n for i in nodes:\n wi = where(elems==i)\n vi = val[wi]\n ai,ni = average_close(vi,tol=tol)\n ai /= ni.reshape(ai.shape[0],-1)\n val[wi] = ai",
"def sum_elements(arr):\n return sum(arr)",
"def compute(self, node, input_vals):\r\n assert len(input_vals) == 1\r\n if node.const_attr!=None:\r\n return np.array(np.sum(input_vals[0], node.const_attr))\r\n else:\r\n #print(np.sum(input_vals[0]))\r\n return np.array(np.sum(input_vals[0]))",
"def sum(self):\n import numpy as np\n\n # covering zero-matrices\n if self.child_nodes == {}:\n return self.null_value\n\n def sum_rec(node, offset):\n # making sure the node exists\n if not node:\n return 0\n # checking whether the node is a leaf\n elif node.is_leaf():\n return np.sum(node.dtype.to_mat(node, offset))\n else:\n tmp_result = 0\n # the recursive call\n # checking for the kind of diagram. MTxxx?\n if self.offsets == {}:\n for edge_name in node.child_nodes:\n tmp_result += sum_rec(node.child_nodes[edge_name], node.dtype.to_mat(node, 0, 0))\n # or edge-value dd?\n else:\n for edge_name in node.child_nodes:\n tmp_result += sum_rec(node.child_nodes[edge_name], node.dtype.to_mat(node,\n node.offsets[edge_name],\n offset))\n\n return tmp_result\n\n return sum_rec(self, None)",
"def sum(self) -> float:\n return sum(self.values)",
"def sum(self) -> int:\n return self.root.sum",
"def val_sum(self, axis = None):\n f = self.to_Poly()\n return f.val_sum(axis).to_PolyMesh(self.params)",
"def sum(self):\n if self.isscalar():\n s = self.defval\n else:\n if self.defval:\n msg = \"Sum of a tensor wish defval != 0 not implemented.\"\n raise NotImplementedError(msg)\n s = 0\n for v in self.sects.values():\n s += np.sum(v)\n return s",
"def sum_node_list(node_list):\r\n from operator import add\r\n from functools import reduce\r\n return reduce(add, node_list)",
"def summation(self):\n return sum(self.read_ints())",
"def sum(self):\n return sum(self.values)",
"def compute_node_sums(nodes):\n for node in nodes:\n node.children_summed = 0 # Dynamically add a meta field to Node to improve runtime when computing sums.\n\n leaf_nodes = []\n for node in nodes:\n if len(node.children) == 0:\n leaf_nodes.append(node)\n to_process = leaf_nodes\n while to_process:\n node = to_process.pop()\n # if leaf_node or all child notes computed their sum.\n if len(node.children) == 0 or len(node.children) == node.children_summed:\n node.sum = node.value\n if len(node.children) > 0:\n node.sum = node.sum + sum([child.sum for child in list(node.children.values())])\n if node.parent:\n node.parent.children_summed += 1\n if len(\n node.parent.children) == node.parent.children_summed: # all children have computed their sums\n to_process.append(node.parent)\n\n for node in nodes:\n del node.children_summed",
"def sum_node_list(node_list):\n from operator import add\n from functools import reduce\n return reduce(add, node_list)",
"def sum(self):\n return self.vsum",
"def _sum(self):\n s = 0\n for element, value in self.items():\n s += value\n return s",
"def fn(node):\n if not node: return 0 \n ans = node.val + fn(node.left) + fn(node.right)\n vals.append(ans)\n return ans",
"def sum(self):\n total = 0\n for el in self.__list:\n if type(el) is int or type(el) is float:\n total += el\n elif not el:\n continue\n else:\n total += len(el)\n return total",
"def sum (self):\n return self.values.sum ()",
"def sum (self):\n return self.values.sum ()",
"def sum_of_nodes(t):\n return label(t) + sum([sum_of_nodes(b) for b in branches(t)])",
"def n(self):\n return sum(list(self.nodes.values()))",
"def sum(self):\n return self._reduce_for_stat_function(F.sum, only_numeric=True)",
"def sum(self):\n return self.aggregate(np.sum)",
"def sum(values):\n total = 0\n for i in values:\n total += i\n return total",
"def sum_values(values):\n return (sum(values))",
"def convert_sum(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n if axes:\n node = onnx.helper.make_node(\n 'ReduceSum',\n inputs=input_nodes,\n outputs=[name],\n axes=axes,\n keepdims=keepdims,\n name=name\n )\n else:\n node = onnx.helper.make_node(\n 'ReduceSum',\n inputs=input_nodes,\n outputs=[name],\n keepdims=keepdims,\n name=name\n )\n return [node]",
"def calculate_sum(self):\n\n left_sum = self.left.calculate_sum() if self.left else 0\n right_sum = self.right.calculate_sum() if self.right else 0\n return self.data + left_sum + right_sum",
"def sum_node_list(node_list):\n node_list = [n for n in node_list if n is not None]\n if node_list == []:\n return None\n\n from operator import add\n from functools import reduce\n return reduce(add, node_list)",
"def sum(self):\n return np.dot(self.data.T, self.weights)",
"def sum(cls, vectors):\n result = cls.null()\n for vector in vectors:\n result += vector\n return result"
]
| [
"0.72711855",
"0.6993915",
"0.69905835",
"0.6909588",
"0.6885086",
"0.68823624",
"0.6866198",
"0.68248475",
"0.68051624",
"0.6783232",
"0.6779679",
"0.6768338",
"0.67458636",
"0.6648012",
"0.6581813",
"0.65722775",
"0.6525247",
"0.6508257",
"0.6508257",
"0.64953786",
"0.64827645",
"0.64457995",
"0.64445317",
"0.6415264",
"0.63944477",
"0.63762563",
"0.6373129",
"0.63707334",
"0.6346534",
"0.6297832"
]
| 0.80045086 | 0 |
Compute the nodal sum of values defined on elements. | def nodalSum2(val,elems,tol):
nodes = unique1d(elems)
for i in nodes:
wi = where(elems==i)
vi = val[wi]
ai,ni = average_close(vi,tol=tol)
ai /= ni.reshape(ai.shape[0],-1)
val[wi] = ai | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nodalSum(val,elems,work,avg):\n nodes = unique1d(elems)\n for i in nodes:\n wi = where(elems==i)\n vi = val[wi]\n if avg:\n vi = vi.sum(axis=0)/vi.shape[0]\n else:\n vi = vi.sum(axis=0)\n val[wi] = vi",
"def sum_elements(arr):\n return sum(arr)",
"def compute(self, node, input_vals):\r\n assert len(input_vals) == 1\r\n if node.const_attr!=None:\r\n return np.array(np.sum(input_vals[0], node.const_attr))\r\n else:\r\n #print(np.sum(input_vals[0]))\r\n return np.array(np.sum(input_vals[0]))",
"def sum(self):\n import numpy as np\n\n # covering zero-matrices\n if self.child_nodes == {}:\n return self.null_value\n\n def sum_rec(node, offset):\n # making sure the node exists\n if not node:\n return 0\n # checking whether the node is a leaf\n elif node.is_leaf():\n return np.sum(node.dtype.to_mat(node, offset))\n else:\n tmp_result = 0\n # the recursive call\n # checking for the kind of diagram. MTxxx?\n if self.offsets == {}:\n for edge_name in node.child_nodes:\n tmp_result += sum_rec(node.child_nodes[edge_name], node.dtype.to_mat(node, 0, 0))\n # or edge-value dd?\n else:\n for edge_name in node.child_nodes:\n tmp_result += sum_rec(node.child_nodes[edge_name], node.dtype.to_mat(node,\n node.offsets[edge_name],\n offset))\n\n return tmp_result\n\n return sum_rec(self, None)",
"def sum(self) -> float:\n return sum(self.values)",
"def sum(self) -> int:\n return self.root.sum",
"def val_sum(self, axis = None):\n f = self.to_Poly()\n return f.val_sum(axis).to_PolyMesh(self.params)",
"def sum(self):\n if self.isscalar():\n s = self.defval\n else:\n if self.defval:\n msg = \"Sum of a tensor wish defval != 0 not implemented.\"\n raise NotImplementedError(msg)\n s = 0\n for v in self.sects.values():\n s += np.sum(v)\n return s",
"def sum_node_list(node_list):\r\n from operator import add\r\n from functools import reduce\r\n return reduce(add, node_list)",
"def summation(self):\n return sum(self.read_ints())",
"def sum(self):\n return sum(self.values)",
"def compute_node_sums(nodes):\n for node in nodes:\n node.children_summed = 0 # Dynamically add a meta field to Node to improve runtime when computing sums.\n\n leaf_nodes = []\n for node in nodes:\n if len(node.children) == 0:\n leaf_nodes.append(node)\n to_process = leaf_nodes\n while to_process:\n node = to_process.pop()\n # if leaf_node or all child notes computed their sum.\n if len(node.children) == 0 or len(node.children) == node.children_summed:\n node.sum = node.value\n if len(node.children) > 0:\n node.sum = node.sum + sum([child.sum for child in list(node.children.values())])\n if node.parent:\n node.parent.children_summed += 1\n if len(\n node.parent.children) == node.parent.children_summed: # all children have computed their sums\n to_process.append(node.parent)\n\n for node in nodes:\n del node.children_summed",
"def sum_node_list(node_list):\n from operator import add\n from functools import reduce\n return reduce(add, node_list)",
"def sum(self):\n return self.vsum",
"def _sum(self):\n s = 0\n for element, value in self.items():\n s += value\n return s",
"def fn(node):\n if not node: return 0 \n ans = node.val + fn(node.left) + fn(node.right)\n vals.append(ans)\n return ans",
"def sum(self):\n total = 0\n for el in self.__list:\n if type(el) is int or type(el) is float:\n total += el\n elif not el:\n continue\n else:\n total += len(el)\n return total",
"def sum (self):\n return self.values.sum ()",
"def sum (self):\n return self.values.sum ()",
"def sum_of_nodes(t):\n return label(t) + sum([sum_of_nodes(b) for b in branches(t)])",
"def n(self):\n return sum(list(self.nodes.values()))",
"def sum(self):\n return self._reduce_for_stat_function(F.sum, only_numeric=True)",
"def sum(self):\n return self.aggregate(np.sum)",
"def sum(values):\n total = 0\n for i in values:\n total += i\n return total",
"def sum_values(values):\n return (sum(values))",
"def convert_sum(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n if axes:\n node = onnx.helper.make_node(\n 'ReduceSum',\n inputs=input_nodes,\n outputs=[name],\n axes=axes,\n keepdims=keepdims,\n name=name\n )\n else:\n node = onnx.helper.make_node(\n 'ReduceSum',\n inputs=input_nodes,\n outputs=[name],\n keepdims=keepdims,\n name=name\n )\n return [node]",
"def calculate_sum(self):\n\n left_sum = self.left.calculate_sum() if self.left else 0\n right_sum = self.right.calculate_sum() if self.right else 0\n return self.data + left_sum + right_sum",
"def sum_node_list(node_list):\n node_list = [n for n in node_list if n is not None]\n if node_list == []:\n return None\n\n from operator import add\n from functools import reduce\n return reduce(add, node_list)",
"def sum(self):\n return np.dot(self.data.T, self.weights)",
"def sum(cls, vectors):\n result = cls.null()\n for vector in vectors:\n result += vector\n return result"
]
| [
"0.80045086",
"0.6993915",
"0.69905835",
"0.6909588",
"0.6885086",
"0.68823624",
"0.6866198",
"0.68248475",
"0.68051624",
"0.6783232",
"0.6779679",
"0.6768338",
"0.67458636",
"0.6648012",
"0.6581813",
"0.65722775",
"0.6525247",
"0.6508257",
"0.6508257",
"0.64953786",
"0.64827645",
"0.64457995",
"0.64445317",
"0.6415264",
"0.63944477",
"0.63762563",
"0.6373129",
"0.63707334",
"0.6346534",
"0.6297832"
]
| 0.72711855 | 1 |
Sets the focal_device_id of this TopologyAttachmentResultDto. | def focal_device_id(self, focal_device_id):
self._focal_device_id = focal_device_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def device_id(self, device_id):\n\n self._device_id = device_id",
"def device_id(self, device_id):\n\n self._device_id = device_id",
"def attachment_file_id(self, attachment_file_id):\n\n self._attachment_file_id = attachment_file_id",
"def set_device(self, device):\n self.device = device",
"def fantasy_alarm_player_id(self, fantasy_alarm_player_id):\n\n self._fantasy_alarm_player_id = fantasy_alarm_player_id",
"def flavor_id(self, flavor_id):\n self._flavor_id = flavor_id",
"def flavor_id(self, flavor_id):\n self._flavor_id = flavor_id",
"def device_mo_id(self, device_mo_id):\n\n self._device_mo_id = device_mo_id",
"def fid(self):\n\n if self._fid is not None:\n return(self._fid)\n\n if self._spec is not None:\n self._fid = np.fft.ifft(np.fft.ifftshift(self._spec))\n return(self._fid)\n\n return(None)",
"def focal_point(self):\n return self._focal_point",
"def set_device(self, device: torch.Tensor) -> None:\n raise NotImplementedError",
"def device_path(self, value):\n self._device_path = value",
"def device(self, device):\n\n self._device = device",
"def device_id(self, device_id):\n if device_id is None:\n raise ValueError(\"Invalid value for `device_id`, must not be `None`\") # noqa: E501\n\n self._device_id = device_id",
"def device_id(self):\n return self._device_id",
"def supporting_device(self, supporting_device):\n\n self._supporting_device = supporting_device",
"def device_id(self):\n return self._annotations.get(EventData.PROP_DEVICE_ID, None)",
"def attachment_id(self, attachment_id):\n\n self._attachment_id = attachment_id",
"def device_id(self) -> Optional[str]:\n return self.relay(\"device_id\")",
"def _set_device_path(self):\n pass",
"def attachment_upload_id(self, attachment_upload_id):\n\n self._attachment_upload_id = attachment_upload_id",
"def script_set_device(self,udid=None):\n self.desired_caps['udid'] = udid;",
"def __init__(self, authenticator, device_id='DO_NOT_TRACK_THIS_DEVICE'):\n super(DeviceIDAuthorizer, self).__init__(authenticator)\n self._device_id = device_id",
"def file_id(self, file_id):\n if self.local_vars_configuration.client_side_validation and file_id is None: # noqa: E501\n raise ValueError(\"Invalid value for `file_id`, must not be `None`\") # noqa: E501\n\n self._file_id = file_id",
"def file_id(self, file_id):\n if self.local_vars_configuration.client_side_validation and file_id is None: # noqa: E501\n raise ValueError(\"Invalid value for `file_id`, must not be `None`\") # noqa: E501\n\n self._file_id = file_id",
"def device_token(self, device_token):\n \n self._device_token = device_token",
"def setDeviceID(self, id, unitCode=0):\n resp = self.XAPCommand('DID', id, unitCode=unitCode)\n return int(resp)",
"def set_device(num):\n safe_call(backend.get().af_set_device(num))",
"def device(deviceid):\n\n # Torch device\n # pylint: disable=E1101\n return deviceid if isinstance(deviceid, torch.device) else torch.device(Models.reference(deviceid))",
"def flavor_id(self):\n return self._flavor_id"
]
| [
"0.5081288",
"0.5081288",
"0.47622368",
"0.4721567",
"0.46936834",
"0.467642",
"0.467642",
"0.46551454",
"0.4622062",
"0.46029237",
"0.4589448",
"0.45663542",
"0.45312893",
"0.44925702",
"0.4490328",
"0.44683826",
"0.44367197",
"0.43781528",
"0.432919",
"0.43208218",
"0.42943886",
"0.42840827",
"0.42834717",
"0.42781737",
"0.42781737",
"0.42620704",
"0.41931507",
"0.4184878",
"0.41744256",
"0.41523293"
]
| 0.7755509 | 0 |
Sets the link_data of this TopologyAttachmentResultDto. | def link_data(self, link_data):
self._link_data = link_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_linked_data(\n self,\n val=None\n ):\n if val != None:\n self.linked_data = val",
"def link(self, link):\n\n self.container['link'] = link",
"def link(self, link):\n\n self._link = link",
"def link(self, link):\n\n self._link = link",
"def link(self, link):\n\n self._link = link",
"def link(self, link):\n\n self._link = link",
"def link(self, link):\n\n self._link = link",
"def link(self, link):\n\n self._link = link",
"def link(self, link):\n\n self._link = link",
"def update_link_id(self, data):\n\n self.data[data['project_name']]['nodes'][data['first']]['ports'][data['first_port']]['link_id'] = data['link_id']\n self.data[data['project_name']]['nodes'][data['second']]['ports'][data['second_port']]['link_id'] = data['link_id']",
"def link(self, link):\n\n self._set_field(\"link\", link)",
"def update(self, linkData):\n for field, value in linkData.items():\n if field == 'flags':\n setattr(self, field, value)\n elif(value is not None and value.strip() != ''):\n setattr(self, field, value)",
"def create_link(self):\n if self.link_info:\n link_type = self.file.options['link_type']\n if 'node' in self.link_info:\n target_path = self.link_info['node'].full_path\n if link_type == 'string':\n # create string dataset containing link path\n #- self.file.file_pointer.create_dataset(self.full_path, data=\"h5link:/\" + target_path)\n self.file.create_dataset(self.full_path, data=\"h5link:/\" + target_path)\n elif link_type == 'hard':\n # create hard link to target. This implemented by h5py \"Softlink\". Not sure why.\n #- self.file.file_pointer[self.full_path] = h5py.SoftLink(target_path)\n self.file.create_softlink(self.full_path, target_path)\n else: \n raise Exception('Invalid option value for link_type (%s)' % link_type)\n elif 'extlink' in self.link_info:\n file, path = self.link_info['extlink']\n # link to external file\n if link_type == 'string':\n # create string dataset containing link path\n target_path = \"%s,%s\" % (file, path)\n #- self.file.file_pointer.create_dataset(self.full_path, data=\"h5extlink:/\" + target_path)\n self.file.create_dataset(self.full_path, data=\"h5extlink:/\" + target_path)\n elif link_type == 'hard':\n # create link to external file\n #- self.file.file_pointer[self.full_path] = h5py.ExternalLink(file,path)\n self.file.create_external_link(self.full_path, file, path) \n else:\n raise Exception('Invalid option value for link_type (%s)' % link_type)\n else:\n raise SystemError(\"** Error: invalid key in link_info %s\" % self.link_info)",
"def link_id(self, link_id):\n\n self._link_id = link_id",
"def set_data(self, data):\n\n pass",
"def get_link_data(self, linkData):\n if linkData[0] is None:\n project_name = \"This Project*\"\n else:\n project_name = linkData[0].replace(\"\\\\\", \"/\")\n\n designName = linkData[1]\n hfssSolutionName = linkData[2]\n forceSourceSimEnabler = linkData[3]\n preserveSrcResEnabler = linkData[4]\n\n arg = [\n \"NAME:DefnLink\",\n \"Project:=\",\n project_name,\n \"Product:=\",\n \"ElectronicsDesktop\",\n \"Design:=\",\n designName,\n \"Soln:=\",\n hfssSolutionName,\n [\"NAME:Params\"],\n \"ForceSourceToSolve:=\",\n forceSourceSimEnabler,\n \"PreservePartnerSoln:=\",\n preserveSrcResEnabler,\n \"PathRelativeTo:=\",\n \"TargetProject\",\n ]\n\n return arg",
"def data(self, data):\n\n self._data = data",
"def data(self, data):\n\n self._data = data",
"def data(self, data):\n\n self._data = data",
"def data(self, data):\n\n self._data = data",
"def data(self, data):\n\n self._data = data",
"def data(self, data):\n\n self._data = data",
"def entities_links_wikidata(self, entities_links_wikidata):\n\n self._entities_links_wikidata = entities_links_wikidata",
"def data(self, data):\n self.__data = data",
"def set_data(self, data):\n self.data = data",
"def data(self, data):\n self._data = data",
"def data(self, data):\n self._data = data",
"def _updateLinkDataDataset(self, dataset, columns=None):\n if columns is None:\n columns = self.getModelObj().getChildren()\n return model_navigator.iqModelNavigatorManager._updateLinkDataDataset(self, dataset=dataset, columns=columns)",
"def setData(self, data):\n return None",
"def set_data(self, data):\n\n self._data = data"
]
| [
"0.5885388",
"0.5762334",
"0.5706907",
"0.5706907",
"0.5706907",
"0.5706907",
"0.5706907",
"0.5706907",
"0.5706907",
"0.5507466",
"0.5478151",
"0.54773843",
"0.5213641",
"0.518306",
"0.51407033",
"0.513635",
"0.5109374",
"0.5109374",
"0.5109374",
"0.5109374",
"0.5109374",
"0.5109374",
"0.50849533",
"0.50801855",
"0.5078325",
"0.50757545",
"0.50757545",
"0.50743407",
"0.50632906",
"0.50630313"
]
| 0.7365313 | 0 |
Sets the node_data of this TopologyAttachmentResultDto. | def node_data(self, node_data):
self._node_data = node_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def node_data(self, node_data):\n self.node_data_ = node_data\n self.label = node_data.label\n self.node_type = node_data.node_type\n self.arity = node_data.arity\n self.min_depth = node_data.min_depth\n self.child_type = node_data.child_type\n self.numpy_func = node_data.numpy_func\n self.tensorflow_func = node_data.tensorflow_func",
"def set_data(node, value):\n node['data'] = value",
"def set_node(self, node):\n self.__node = node",
"def set_temp_data(self, data):\n self.data = data",
"def setData(self, data):\n return None",
"def set_in_data(self, data: NodeData, port: Port):\n self.inputs[port.index] = copy(data)\n if self._check_inputs():\n try:\n self.compute()\n except Exception as e:\n traceback.print_exc()\n else:\n self._statusLabel.setText('×')\n for i in self.outputs:\n self.outputs[i] = None\n for i in range(self.num_ports[PortType.output]):\n self.data_updated.emit(i)",
"def node_info(self, node_info):\n\n self._node_info = node_info",
"def set_data(self, data):\n\n pass",
"def set_data(self, data):\n self.data = data",
"def setNodeDatum(self, node, value):\n\n if not cmds.attributeQuery('datum', node=node, exists=True):\n cmds.addAttr(node, longName='cadence_datum', shortName='datum', niceName='Datum')\n\n cmds.setAttr(node + '.datum', value)",
"def __init__(self, data, node):\n self.data = data\n self.node = node # This is the data structure which holds the data for this node, e.g. lat, lon, etc.",
"def set_data(self, data):\n\n self._data = data",
"def set_data(self, data):\n\n self._data = data",
"def set_data(self, data):\n\n self._data = data",
"def set_node(self, index, node):\r\n self.loc.coord[index] = node",
"def set_data(self, data):\n self._set_data(data)",
"def setDestination(self, node):\n self.dest_node = node",
"def setData(self, data):\n self.data = data\n dagPath, components = self.__getGeometryComponents()\n self.setInfluenceWeights(dagPath, components)\n self.setBlendWeights(dagPath, components)\n\n for attr in ['skinningMethod', 'normalizeWeights']:\n cmds.setAttr('%s.%s' % (self.node, attr), self.data[attr])",
"def SetData(self, data):\r\n\r\n self._data = data",
"def setData(self, data):\n self._data = data",
"def setData(self,data):\n self.data=data\n self.leaf=True",
"def setData(self, data):\n self.data = data",
"def setData(self, data):\n self.data = data",
"def add_node(self, node_data):\n self.__rtags.append(True)\n self.__nodedata.append(data)\n self.__ltags.append(True)",
"def data(self, data):\n self._data = data",
"def data(self, data):\n self._data = data",
"def node_a(self, node_a):\n\n self._node_a = node_a",
"def set_working_node(self, node):\n self.working_node = node",
"def set_node_provenance(self, node_data: Dict):\n self.set_provenance(\"provided_by\", node_data)",
"def data(self, data):\n self.__data = data"
]
| [
"0.5924504",
"0.56284124",
"0.5487792",
"0.52650934",
"0.522697",
"0.52221644",
"0.5181747",
"0.5177556",
"0.5148225",
"0.5126763",
"0.51123697",
"0.51031524",
"0.51031524",
"0.51031524",
"0.5067002",
"0.5042375",
"0.5041182",
"0.50394917",
"0.50161797",
"0.4974361",
"0.49511215",
"0.4930194",
"0.4930194",
"0.4917654",
"0.49149346",
"0.49149346",
"0.49072412",
"0.4896395",
"0.4890098",
"0.4887708"
]
| 0.6978998 | 0 |
Downloads data from the GDC. Combine the smaller files (~KB range) into a grouped download. The API now supports combining UUID's into one uncompressed tarfile using the ?tarfile url parameter. Combining many smaller files into one download decreases the number of open connections we have to make | def download(parser, args):
successful_count = 0
unsuccessful_count = 0
big_errors = []
small_errors = []
total_download_count = 0
validate_args(parser, args)
# sets do not allow duplicates in a list
ids = set(args.file_ids)
for i in args.manifest:
if not i.get('id'):
log.error('Invalid manifest')
break
ids.add(i['id'])
index_client = GDCIndexClient(args.server)
client = get_client(args, index_client)
# separate the smaller files from the larger files
bigs, smalls = index_client.separate_small_files(ids, args.http_chunk_size)
# the big files will be normal downloads
# the small files will be joined together and tarfiled
if smalls:
log.debug('Downloading smaller files...')
# download small file grouped in an uncompressed tarfile
small_errors, count = client.download_small_groups(smalls)
successful_count += count
i = 0
while i < args.retry_amount and small_errors:
time.sleep(args.wait_time)
log.debug('Retrying failed grouped downloads')
small_errors, count = client.download_small_groups(small_errors)
successful_count += count
i += 1
# client.download_files is located in parcel which calls
# self.parallel_download, which goes back to to gdc-client's parallel_download
if bigs:
log.debug('Downloading big files...')
# create URLs to send to parcel for download
bigs = [ urlparse.urljoin(client.data_uri, b) for b in bigs ]
downloaded_files, big_error_dict = client.download_files(bigs)
not_downloaded_url = ''
big_errors_count = 0
if args.retry_amount > 0:
for url, reason in big_error_dict.iteritems():
# only retry the download if it wasn't a controlled access error
if '403' not in reason:
not_downloaded_url = retry_download(
client,
url,
args.retry_amount,
args.no_auto_retry,
args.wait_time)
else:
big_errors.append(url)
not_downloaded_url = ''
if not_downloaded_url:
for b in big_error_dict:
big_errors.append(url)
if big_errors:
log.debug('Big files not downloaded: {0}'
.format(', '.join([ b.split('/')[-1] for b in big_errors ])))
successful_count += len(bigs) - len(big_errors)
unsuccessful_count = len(ids) - successful_count
msg = 'Successfully downloaded'
log.info('{0}: {1}'.format(
colored(msg, 'green') if not args.color_off else msg,
successful_count))
if unsuccessful_count > 0:
msg = 'Failed downloads'
log.info('{0}: {1}'.format(
colored(msg, 'red') if not args.color_off else msg,
unsuccessful_count))
return small_errors or big_errors | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download(size):\n files = glob(f'{size}_chunk/{FILE_BASE}_rdn_*[!.hdr]') \\\n + glob(f'{size}_chunk/{FILE_BASE}_loc_*[!.hdr]') \\\n + glob(f'{size}_chunk/{FILE_BASE}_obs_*[!.hdr]')\n\n if len(files) != 3:\n Logger.info('Downloading data')\n\n req = requests.get(URLS[size])\n if req:\n with zipfile.ZipFile(BytesIO(req.content)) as zip:\n zip.extractall()\n else:\n Logger.error(f'Failed to download {size}_chunk data with HTTP error code: {req.status_code}')\n\n # Try again\n files = glob(f'{size}_chunk/{FILE_BASE}_rdn_*[!.hdr]') \\\n + glob(f'{size}_chunk/{FILE_BASE}_loc_*[!.hdr]') \\\n + glob(f'{size}_chunk/{FILE_BASE}_obs_*[!.hdr]')\n\n if len(files) != 3:\n Logger.error('Not all input files are found')\n return\n\n return files",
"def download_gaia(dest_path, files):\n for f in files:\n # Get URL and file name\n file_url, file_name = f\n print(file_name)\n file_path = os.path.join(path, file_name)\n # Download data (if not already)\n if (not os.path.exists(file_path) and not os.path.isfile(file_path)):\n print(\"Downloading {}...\".format(file_name))\n response = urllib.request.urlopen(file_url)\n data = response.read()\n tar_gz = open(file_path, 'wb')\n tar_gz.write(data)\n tar_gz.close()\n # Be nice\n sleep(1)",
"def maybe_download_and_extract():\n dest_directory = FLAGS.data_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n r = requests.Session().get(DATA_URL)\n with open(filepath, 'wb') as fd:\n for chunk in r.iter_content(500):\n fd.write(chunk)\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')\n if not os.path.exists(extracted_dir_path):\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def download(self):\r\n \r\n # RAR Files names\r\n if self.debug==0:\r\n rar_files_name = [\"K001.rar\",\"K002.rar\",\"K003.rar\",\"K004.rar\",\"K005.rar\",\"K006.rar\",\r\n \"KA01.rar\", \"KA03.rar\", \"KA04.rar\", \"KA05.rar\", \"KA06.rar\", \"KA07.rar\", \r\n \"KA08.rar\", \"KA09.rar\", \"KA15.rar\", \"KA16.rar\", \"KA22.rar\", \"KA30.rar\", \r\n \"KB23.rar\", \"KB24.rar\", \"KB27.rar\", \r\n \"KI01.rar\", \"KI03.rar\", \"KI04.rar\", \"KI05.rar\", \"KI07.rar\", \"KI08.rar\", \r\n \"KI14.rar\", \"KI16.rar\", \"KI17.rar\", \"KI18.rar\", \"KI21.rar\"]\r\n else:\r\n rar_files_name = [\"K002.rar\", \"KA01.rar\", \"KI01.rar\"]\r\n\r\n url = self.url\r\n \r\n dirname = self.rawfilesdir\r\n dir_rar = \"rar_files\"\r\n if not os.path.isdir(dirname):\r\n os.mkdir(dirname)\r\n if not os.path.isdir(os.path.join(dirname, dir_rar)):\r\n os.mkdir(os.path.join(dirname, dir_rar))\r\n \r\n\r\n print(\"Downloading RAR files:\")\r\n for i in rar_files_name:\r\n file_name = i\r\n if not os.path.exists(os.path.join(dirname, dir_rar, file_name)):\r\n urllib.request.urlretrieve(url+file_name, os.path.join(dirname, dir_rar, file_name))\r\n print(file_name)\r\n \r\n print(\"Extracting files:\")\r\n for i in rar_files_name:\r\n if not os.path.exists(os.path.join(dirname, i[:4])):\r\n file_name = os.path.join(dirname, dir_rar, i)\r\n Archive(file_name).extractall(dirname) \r\n print(i)\r\n\r\n if self.debug==0:\r\n files_path = self.files\r\n else:\r\n files_path = files_debug(self.rawfilesdir)\r\n\r\n print(files_path)\r\n self.files = files_path",
"def getData(constrain):\n\n dat_AGS = chunks(AGS, 100)\n for num, ags_c in enumerate(dat_AGS):\n to_download = DOWNLOAD_LINK.format(ags_id=ags_c, constrain=constrain)\n to_download = to_download.replace(\" \", \"\")\n download_name = \"../Data/Gemeinden/{}-{}.csv\".format(\n constrain, num)\n\n url.urlretrieve(to_download, filename=download_name)\n\n sleep(1) # be nice\n\n return(num)",
"def download_data_files(self, dest_directory):\n\t\tif not os.path.exists(dest_directory):\n\t\t\tos.makedirs(dest_directory)\n\t\tfilename = DATA_URL.split('/')[-1]\n\t\tfilepath = os.path.join(dest_directory, filename)\n\t\tif not os.path.exists(filepath):\n\t\t\tdef _progress(count, block_size, total_size):\n\t\t\t\tsys.stdout.write('\\r>> Downloading %s %.1f%%' % (filename,\n\t\t\t\t\t\tfloat(count * block_size) / float(total_size) * 100.0))\n\t\t\t\tsys.stdout.flush()\n\t\t\tfilepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n\t\t\tprint()\n\t\t\tstatinfo = os.stat(filepath)\n\t\t\tprint('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n\t\textracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')\n\t\tif not os.path.exists(extracted_dir_path):\n\t\t\ttarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def download(\n urls,\n output_dir,\n *,\n existing=\"error\",\n jobs=6,\n develop_debug=False,\n authenticate=False, # Seems to work just fine for public stuff\n recursive=True,\n):\n urls = flattened([urls])\n if len(urls) > 1:\n raise NotImplementedError(\"multiple URLs not supported\")\n if not urls:\n # if no paths provided etc, we will download dandiset path\n # we are at, BUT since we are not git -- we do not even know\n # on which instance it exists! Thus ATM we would do nothing but crash\n raise NotImplementedError(\"No URLs were provided. Cannot download anything\")\n url = urls[0]\n girder_server_url, asset_type, asset_id = parse_dandi_url(url)\n\n # We could later try to \"dandi_authenticate\" if run into permission issues.\n # May be it could be not just boolean but the \"id\" to be used?\n client = girder.get_client(\n girder_server_url,\n authenticate=authenticate,\n progressbars=True, # TODO: redo all this\n )\n\n lgr.info(f\"Downloading {asset_type} with id {asset_id} from {girder_server_url}\")\n\n # there might be multiple asset_ids, e.g. if multiple files were selected etc,\n # so we will traverse all of them\n files = flatten(\n _get_asset_files(\n asset_id_, asset_type, output_dir, client, authenticate, existing, recursive\n )\n for asset_id_ in set(flattened([asset_id]))\n )\n\n Parallel(n_jobs=jobs, backend=\"threading\")(\n delayed(client.download_file)(\n file[\"id\"],\n op.join(output_dir, file[\"path\"]),\n existing=existing,\n attrs=file[\"attrs\"],\n # TODO: make it less \"fluid\" to not breed a bug where we stop verifying\n # for e.g. digests move\n digests={\n d: file.get(\"metadata\")[d]\n for d in metadata_digests\n if d in file.get(\"metadata\", {})\n },\n )\n for file in files\n )",
"def download(data_type, gs_aoi, main_dir, local_rep=True):\n # Get URLs for tiles covered by a polygon:\n tiles = get_tile_names(gs_aoi, data_type)\n print(f'Found {len(tiles)} products')\n\n # Make sure temporary folder for download exists:\n dwn_dir = join(main_dir, data_type)\n if not exists(dwn_dir):\n makedirs(dwn_dir)\n\n if local_rep:\n # Copy DTM files from local repository:\n print('\\nCopying DTM files:')\n for num, name in enumerate(tiles):\n print('{} of {}'.format(num+1, len(tiles)))\n dwn_stat, _ = copy_local(name, dwn_dir)\n print('File {}.'.format(dwn_stat))\n out_msg = 'Finished copying DTM files!'\n else:\n # Download DTM files:\n print(f\"\\nDownloading {data_type} files:\")\n for num, name in enumerate(tiles):\n print('{} of {}'.format(num+1, len(tiles)))\n dwn_stat, _ = download_file(name, dwn_dir)\n print('File {}.'.format(dwn_stat))\n if data_type == \"DTM\":\n # Convert to Geotiff\n print(\"Converting to GeoTIFF...\")\n result = asc_to_gtif(dwn_dir)\n print(result)\n out_msg = \"Finished downloading DTM files!\"\n\n # Output dictionary:\n out = {'out_msg': out_msg,\n 'out_dir': dwn_dir}\n\n return out",
"def _download_and_uncompress_dataset(dataset_dir):\n filename = _DATA_URL.split('/')[-1]\n filepath = os.path.join(dataset_dir, filename)\n\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(_DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dataset_dir)",
"def main(output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n baseurl = 'http://codeandbeer.org/virtual/BigData/Labs/'\n files = ['Booking-20151012-1322.csv', 'Booking-20181025-1232.csv']\n for filename in files:\n r = requests.get(baseurl+filename, stream=True)\n if r.status == 200:\n with open(output_filepath+\"/\"+filename, \"wb\") as f:\n f.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)",
"def download_and_extract_data(tmp_dir, dataset):\n url = dataset[0]\n print(dataset)\n compressed_filename = os.path.basename(url)\n compressed_file = generator_utils.maybe_download(\n tmp_dir, compressed_filename, url)\n\n for file in dataset[1]:\n tf.logging.info(\"Reading file: %s\" % file)\n filepath = os.path.join(tmp_dir, file)\n\n # Extract from tar if needed.\n if not tf.gfile.Exists(filepath):\n with tarfile.open(compressed_file, \"r:gz\") as corpus_tar:\n corpus_tar.extractall(tmp_dir)\n\n documents_filename, labels_filename = dataset[1]\n documents_filepath = os.path.join(tmp_dir, documents_filename)\n labels_filepath = os.path.join(tmp_dir, labels_filename)\n return documents_filepath, labels_filepath",
"def download_all(conn, logger):\n # setup slices, 24 in total\n slices = [f'year{x}month{y}' for x in [2, 1] for y in range(12, 0, -1)]\n for slice in slices:\n download_intraday_extended(conn, logger, slice)",
"def _downloadBatch(idBatch, destDir, downloadFullWGS = False) :\n # Download the records\n r = _getRecordBatch(idBatch)\n # Split the downloaded strings into records\n r = r.strip().split(\"\\n//\")\n # print(r)\n assert r[-1] == \"\"\n r = r[0: -1]\n assert len(r) == len(idBatch)\n # Save the records\n for i in range(len(idBatch)) :\n with open(os.path.join(destDir, idBatch[i] + \".gb\"), \"w\") as fo :\n fo.write(r[i].strip())\n fo.write(\"\\n//\\n\")\n WGSline = _recordIsWGS(r[i])\n if WGSline and downloadFullWGS :\n downloadWGS(r[i], destDir)",
"def download(data_type, gs_aoi, main_dir):\n # Get URLs for tiles covered by a polygon:\n # ----------------------------------------\n tiles = get_tile_names(gs_aoi)\n print('Found {} products'.format(len(tiles['tile_names'])))\n\n # Make sure temporary folder for download exists:\n # -----------------------------------------------\n dwn_dir = join(main_dir, data_type)\n if not exists(dwn_dir):\n makedirs(dwn_dir)\n\n # Proceed to download:\n # --------------------\n if data_type == 'DTM':\n # DOWNLOAD DTM FILES & UNZIP:\n # ---------------------------\n print('\\nDownloading DTM files:')\n for num, name in enumerate(tiles['dtm_url']):\n print('{} of {}'.format(num+1, len(tiles['dtm_url'])))\n dwn_stat, file_name = download_file(name, dwn_dir)\n print('File {}.'.format(dwn_stat))\n extract_zip(join(dwn_dir, file_name))\n # Delete ZIP file after extraction\n remove(join(dwn_dir, file_name))\n \n # Finished downloading:\n # ---------------------\n out_msg = 'Finished downloading DTM files!'\n \n elif data_type == 'LAZ':\n # DOWNLOAD LAZ FILES:\n # -------------------\n print('\\nDownloading LAZ files:')\n for num, name in enumerate(tiles['laz_url']):\n print('{} of {}'.format(num+1, len(tiles['laz_url'])))\n dwn_stat, _ = download_file(name, dwn_dir)\n print('File {}.'.format(dwn_stat))\n \n # Finished downloading:\n # ---------------------\n out_msg = 'Finished downloading LAZ files!'\n \n else:\n dwn_dir = None\n out_msg = 'Unexpected data_type'\n \n # Output dictionary:\n # ------------------\n out = {'out_msg': out_msg,\n 'out_dir': dwn_dir}\n \n return out",
"def main():\n for i, url in enumerate(opts.thread, start=1):\n opts.archived_md5 = reload_archive()\n thread = DownloadableThread(i, url)\n thread.resolve_path()\n asyncio.run(thread.download(), debug=False)",
"def download(urls: List[str], num_threads: int = 40) -> List[str]:\n\n num_files = len(urls)\n start = perf_counter()\n\n print(\"Starting download of %s files . . .\" % num_files)\n\n results = multiprocess(urls, Downloader, num_threads=num_threads)\n\n dur = perf_counter() - start\n print(\"Completed download of %s files after %.3f seconds.\" % (num_files, dur))\n\n return results",
"def download_dataset(target_dataset, comet):\n data_paths = list(get_data_paths().values())[0]\n data_store = StoreManager(path=data_paths)\n\n logging.info('STARTING tar download')\n comet.log_dataset_info(name=target_dataset, version=None, path=data_paths)\n start = time.time()\n data_store.download_file(target_dataset)\n end = time.time()\n logging.info('DOWNLOAD time taken: ' + str(end - start))\n comet.log_dataset_hash(target_dataset)\n if target_dataset.endswith('.tar.gz'):\n logging.info('STARTING untarring')\n tf = tarfile.open(target_dataset)\n tf.extractall()\n logging.info('COMPLETING untarring')",
"def download(query, destination='', max_items=None):\n destination = os.path.join(destination, query)\n eol_id = search(query)\n urls = []\n for idx, url in enumerate(get_images(eol_id)):\n filepath = os.path.join(destination, str(idx))\n data.download_image(url, filepath)\n print(idx)\n if max_items and idx >= max_items:\n break",
"def get(self):\n # calculate the total size\n file_size = self.num_chunk * self.chunk_size\n\n if self.verbose:\n print 'Estimated download size: %d' % file_size\n\n # iterate through entire calculated file size with the specified chunk\n # size, create new threads to process the download in parallel\n for location in range(0, file_size, self.chunk_size):\n count = len(self.threads)\n\n if self.verbose:\n print 'Creating download thread %d' % count\n\n # create thread arguments and new thread with function to target\n # for processing and being processing\n thread_args = (location, count)\n thread = threading.Thread(target=self._download, args=thread_args)\n self.threads.append(thread)\n thread.start()\n\n # wait until all active threads are complete\n while threading.active_count() > 1:\n time.sleep(0.1)\n\n # create final file path that all downloads will merge into\n filepath = os.path.join(os.getcwd(), self.filename)\n\n if self.verbose:\n print 'Downloads complete, file merging at %s' % filepath\n\n # iterate through all temp files and write to final file\n with open(filepath, 'wb') as open_file:\n for i in range(self.num_chunk):\n\n # recreate the temporary file path to get chunk from\n temp_name = self.filename + '_part_%d' % i\n temp_path = os.path.join(os.getcwd(), temp_name)\n\n # check if temp file exists before trying to write it\n if not os.path.isfile(temp_path):\n continue\n\n # copy the temporary file into the final files\n # delete the temporary file once completed\n shutil.copyfileobj(open(temp_path, 'rb'), open_file)\n os.remove(temp_path)\n\n # if no file was written then remove\n if os.path.getsize(filepath) < 1:\n os.remove(filepath)\n\n if self.verbose:\n print 'No data to write to file for %s' % self.filename\n\n if self.verbose:\n print 'Files merged and deleted. File saved at %s' % filepath",
"def maybe_download_and_extract():\n dest_directory = FLAGS.data_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')\n if not os.path.exists(extracted_dir_path):\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def download_dataset(dataset):\n\n if dataset not in URLS:\n print(f\"unknown dataset {dataset}\")\n sys.exit(0)\n\n filename = f'{dataset}.tar.gz'\n url = URLS[dataset]\n\n if not os.path.exists(filename):\n print(f'downloading dataset \"{dataset}\"')\n os.system(f'curl \"{url}\" -o {filename}')\n else:\n print(f'zipfile \"{filename}\" already exists, remove it if you want to re-download.')\n\n if not os.path.exists(dataset):\n print(f'extracting \"{filename}\"')\n os.system(f'tar -xvf {filename}')\n else:\n print(f'folder \"{dataset}\" already exists, remove it if you want to re-create.')\n\n image_chips = f'{dataset}/image-chips'\n label_chips = f'{dataset}/label-chips'\n if not os.path.exists(image_chips) and not os.path.exists(label_chips):\n print(\"creating chips\")\n libs.images2chips.run(dataset)\n else:\n print(f'chip folders \"{image_chips}\" and \"{label_chips}\" already exist, remove them to recreate chips.')",
"def download_files(self):",
"def download(self):\n\n with open(self.dataset_path) as dataset_file:\n dataset = json.load(dataset_file)\n\n path = \"\".join([POST_HIT_PATH, dataset[\"dataset\"][\"data_path\"]])\n if not os.path.exists(path):\n os.makedirs(path)\n\n protocole = dataset[\"dataset\"][\"protocole\"]\n\n download_links = []\n\n for resource in dataset[\"dataset\"][\"resources\"]:\n file_path = \"\".join([path, resource[\"filename\"]])\n\n #Check if the the download link has not been used before (One download link for all)\n if resource[\"download_link\"] not in download_links:\n \n print(\"DOWNLOADING : {}\".format(resource[\"filename\"]))\n f = urllib.request.urlopen(resource[\"download_link\"])\n data = f.read()\n with open(file_path, \"wb\") as donwload_file:\n donwload_file.write(data)\n\n download_links.append(resource[\"download_link\"])\n\n \n #Extract all files from the tar archives if necessary\n if tarfile.is_tarfile(file_path):\n tf = tarfile.open(file_path)\n tf.exractall()",
"def kegg_download_manager_synchronous(list_of_ids, wait=1):\n urls = ['http://rest.kegg.jp/get/%s' % '+'.join(chunk) for chunk in chunks(list(list_of_ids), 10)]\n num_urls = len(urls)\n print(f\"Total urls to download: {num_urls}. Progress will be shown below.\")\n results = []\n for url in tqdm(urls):\n results.append(download_synchronous(url))\n time.sleep(wait)\n\n return [raw_record for raw_records in results for raw_record in raw_records.split('///')[:-1]]",
"def download(self) -> None:\n os.makedirs(self.root, exist_ok=True)\n\n for subset in self.subsets:\n if self._check_subset_integrity(subset):\n print(f\"{subset} already downloaded and verified\")\n continue\n path = os.path.join(self.root, subset + \".tar.gz\")\n\n already_present = os.path.isfile(path)\n if not already_present:\n subset_url = self.openslr_url + subset + \".tar.gz\"\n with requests.get(subset_url, stream=True) as r:\n r.raise_for_status()\n with open(path, \"wb\") as f:\n shutil.copyfileobj(r.raw, f)\n\n archive_md5 = self.data_files[subset][\"archive_md5\"]\n if utils.checksum_file(path, \"md5\") != archive_md5:\n raise utils.DownloadError(f\"invalid checksum for {path}\")\n\n with tarfile.open(path, mode=\"r|gz\") as tar:\n tar.extractall(self.root)\n\n if not already_present:\n os.remove(path)",
"def download_all(): #@save\n for name in DATA_HUB:\n download(name)",
"def download_many(archivos:[(\"url\",\"nombre\")], carpeta:str=PATH, *, ignore_error:bool=True, _gui:bool=False, **tqdm_karg):",
"def download_data(files: page_iterator.HTTPIterator, folder: str) -> None:\n logging.info('File download Started... Wait for the job to complete.')\n\n # create folder locally if not exists\n if not os.path.exists(folder): os.makedirs(folder)\n\n for file in files:\n logging.info('GCS File: {}'.format(file.name))\n destination_uri = '{}/{}'.format(folder, file.name.split('/')[-1])\n file.download_to_filename(destination_uri if destination_uri.endswith('.csv') else destination_uri + '.csv')\n logging.info('Exported {} to {}'.format(file.name, destination_uri))\n\n return None",
"def download_fastq():\n\n mkdir(FASTQ_DIR)\n\n template = \"\"\"fastq-dump --split-files --gzip {}\"\"\"\n\n printp(\"\"\"\\n#\\n# download all the fastq files\\n#\"\"\")\n printp(\"\"\"\\n# drmr:label fastq-download\"\"\")\n printp(\"\"\"\\n# drmr:job time_limit=2h working_directory={}\"\"\".format(FASTQ_DIR))\n\n for library, info in DATA.items():\n printp(template.format(get_srr(library)))\n printp(template.format(get_input_control_srr(library)))\n\n printp(\"\"\"\\n# drmr:wait\"\"\")",
"def download_data():\n url = 'https://www.dropbox.com/s/8oehplrobcgi9cq/imdb.tgz?dl=1'\n urllib.request.urlretrieve(url, 'imdb.tgz')\n tar = tarfile.open(\"imdb.tgz\")\n tar.extractall()\n tar.close()"
]
| [
"0.63961786",
"0.6266496",
"0.6059152",
"0.6036518",
"0.60253733",
"0.59880096",
"0.5981087",
"0.59605277",
"0.5922241",
"0.58972925",
"0.58775",
"0.5858991",
"0.58549273",
"0.58432364",
"0.5839691",
"0.5825071",
"0.5797404",
"0.5794432",
"0.5772822",
"0.57607055",
"0.57580495",
"0.57534003",
"0.5747673",
"0.5712063",
"0.5702502",
"0.56985456",
"0.5697729",
"0.56887454",
"0.56867623",
"0.56552094"
]
| 0.7188963 | 0 |
The percentage of rate limit. | def rate_limit_percentage(self) -> Optional[float]:
return pulumi.get(self, "rate_limit_percentage") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pct(self):\n\t\treturn self.bottle.pct()",
"def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)",
"def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)",
"def get_percentage(self):\n return self.percentage",
"def get_percentage(self):\n return self.percentage",
"def percentage(self) -> str:\n return ranged_value_to_percentage(\n self._device.fan_speed_limits, self._device.fan_speed\n )",
"def rate(self) -> float:\n return self.success_cnt / self.total_cnt if self.total_cnt > 0 else 1.0",
"def PercentMaxRate(self):\n\t\treturn self._get_attribute('percentMaxRate')",
"def percent(self):\r\n return self._percent",
"def get_percent(self):\n return self.percent",
"def getPercent(self):\n if isinstance(self.score,numbers.Number) and self.getMaximum():\n return (1.0*self.score/self.getMaximum())\n return None",
"def percentage(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"percentage\")",
"def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress",
"def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress",
"def percentage(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"percentage\")",
"def percentage_update(self):\n\n self.event_update()\n return self.percentage",
"def bytes_limit_per_file_percent(self) -> Optional[float]:\n return pulumi.get(self, \"bytes_limit_per_file_percent\")",
"def files_limit_percent(self) -> Optional[float]:\n return pulumi.get(self, \"files_limit_percent\")",
"def get_opinion_percent(self):\n return (self.get_percent()+100)/2",
"def as_percent(self):\n if self.dynamic:\n self._update_db_obj()\n return f\"{(self._db_obj.progress / self._db_obj.total) * 100}%\"",
"def percent_busy(self):\n return self._percent_busy",
"def progress_rate (self):\n raise NotImplementedError('Subclass must implement this method')",
"def percent_b(self) -> float:\n return self._percent_b",
"def rate(self):\n return self.brate / FAC",
"def percentage_limiter(percentage: float):\n if percentage < 0:\n return 0\n elif 0 <= percentage <= 1:\n return percentage\n else:\n return 1",
"def get_estimated_percentage(self):\n now_id = now_as_id()\n message_id = self.last_message_id\n if message_id >= now_id:\n return 100.0\n \n channel_id = self.source_channel.id\n if channel_id >= message_id:\n return 0.0\n \n if self.is_polling_done():\n return 100.0\n \n return (1.0 - (now_id - message_id) / (now_id - channel_id)) * 100.0",
"def percent_processed(self):\n try:\n return (self.pos / self.data_encap.size) * 100.0\n except ZeroDivisionError:\n return 100.0",
"def percentage(self):\n return sum(self.chunk_percentage) / self.total_steps",
"def max_percentage(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"max_percentage\")",
"def get_acceptance(self):\n return self.count_accepted / self.count_proposed"
]
| [
"0.77211916",
"0.7705641",
"0.7513694",
"0.7500773",
"0.7500773",
"0.74677795",
"0.7410109",
"0.7388711",
"0.73542374",
"0.73428303",
"0.7216033",
"0.72125334",
"0.7185409",
"0.7185409",
"0.7178804",
"0.71685594",
"0.7082458",
"0.70084995",
"0.69748604",
"0.69652313",
"0.69372606",
"0.68513393",
"0.6850427",
"0.68489933",
"0.6823905",
"0.67920583",
"0.6754221",
"0.6726722",
"0.67078686",
"0.67000306"
]
| 0.8822983 | 0 |
Gets the capabilities of the cognitive services account. Each item indicates the capability of a specific feature. The values are readonly and for reference only. | def capabilities(self) -> Sequence['outputs.SkuCapabilityResponse']:
return pulumi.get(self, "capabilities") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def capabilities(self):\n\n class Capabilities(ct.Structure):\n _fields_ = [(\"Size\", ct.c_ulong),\n (\"AcqModes\", ct.c_ulong),\n (\"ReadModes\", ct.c_ulong),\n (\"FTReadModes\", ct.c_ulong),\n (\"TriggerModes\", ct.c_ulong),\n (\"CameraType\", ct.c_ulong),\n (\"PixelModes\", ct.c_ulong),\n (\"SetFunctions\", ct.c_ulong),\n (\"GetFunctions\", ct.c_ulong),\n (\"Features\", ct.c_ulong),\n (\"PCICard\", ct.c_ulong),\n (\"EMGainCapability\", ct.c_ulong)]\n\n stru = Capabilities()\n stru.Size = ct.sizeof(stru)\n self.lib.GetCapabilities(ct.pointer(stru))\n\n return stru",
"def capabilities(self) -> dto.Capabilities:\n raise NotImplementedError",
"def get_capabilities(self):\n\n service = self.__get_service()\n capability = self.__get_capability()\n contents = {\"service\" : service, \"capability\" : capability}\n return contents, self.params['format']",
"def capability(self):\n code, data, capabilities = (\n self.__send_command(\"CAPABILITY\", withcontent=True))\n if code == \"OK\":\n return capabilities\n return None",
"def get_caps(self):\n return ObjectCapabilities.get_capabilities(self)",
"def capabilities(self):\n return []",
"def capabilities(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"capabilities\")",
"def get_capabilities(self):\n return Capabilities(javabridge.call(self.jobject, \"getCapabilities\", \"()Lweka/core/Capabilities;\"))",
"def capabilities(self):\n pass",
"def __get_capability(self):\n requests = self.__get_capability_request()\n exception = self.__get_capability_exception()\n layers = self.__get_capability_layer()\n \n capability = { \"requests\": requests,\n \"exception\" : exception,\n \"layers\" : layers}\n return capability",
"def get_capabilities(self, method='get'):\n self.client.getcapabilities()\n\n self._has_capabilities = True",
"def getcapabilities(self):\n reader = WFSCapabilitiesReader(self.version, auth=self.auth)\n return openURL(\n reader.capabilities_url(self.url), timeout=self.timeout,\n headers=self.headers, auth=self.auth\n )",
"def capabilities(self):\n return Capabilities(\n immutable = True,\n deferred = False,\n persistent = True,\n appendable = False,\n remote=True,\n )",
"def capabilities(self):\n return Capabilities(\n immutable = True,\n deferred = False,\n persistent = True,\n appendable = False,\n remote=True,\n )",
"def get_capabilities(params,defaults):\n cap = CapabilitiesController (params,defaults)\n return cap.get_capabilities()",
"def control_capabilities(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"control_capabilities\"), kwargs)",
"def list_caps():\n global _CAPABILITIES_MAP\n\n try:\n return tuple(sorted(_CAPABILITIES_MAP.keys()))\n\n except NameError:\n pass # We can remedy this.\n\n loop = get_loop()\n\n controller_connection = CioRoot(loop)\n\n _CAPABILITIES_MAP = {}\n\n for capability_id in controller_connection.init():\n _CAPABILITIES_MAP[capability_id] = {\n 'acquire': controller_connection.acquire,\n 'release': controller_connection.release,\n }\n\n return tuple(sorted(_CAPABILITIES_MAP.keys()))",
"def capabilities(self):\n return None",
"def supported_capabilities(self) -> Optional['outputs.SupportedCapabilitiesResponse']:\n return pulumi.get(self, \"supported_capabilities\")",
"def get_capabilities(self):\n return json.dumps({'network_api': 'pyez'})",
"def capabilities(self):\n return self._dll.JLINKARM_GetEmuCaps()",
"def detect_supported_caps():\n result = []\n # generate list of supported capabilities\n\n # Intel RDT L3 CAT\n if common.PQOS_API.is_l3_cat_supported():\n result.append(common.CAT_L3_CAP)\n\n # Intel RDT L2 CAT\n if common.PQOS_API.is_l2_cat_supported():\n result.append(common.CAT_L2_CAP)\n\n # Intel RDT MBA\n if common.PQOS_API.is_mba_supported():\n result.append(common.MBA_CAP)\n\n if sstbf.is_sstbf_enabled():\n result.append(common.SSTBF_CAP)\n\n if power.is_sstcp_enabled():\n result.append(common.POWER_CAP)\n\n return result",
"def extended_capabilities(self):\n buf = (ctypes.c_uint8 * 32)()\n self._dll.JLINKARM_GetEmuCapsEx(buf, 32)\n return list(buf)",
"def capability_tokens(self):\n return tuple(self._capabilities.keys())",
"def default_capabilities(self):\n return CAPABILITIES",
"def get_quantization_capability(self):\n return self.cur_config['capabilities']",
"def to_capabilities(self):",
"def capability(cls):\n return relationship.many_to_one(cls, 'capability')",
"def caps(self):\n return self._caps",
"def get_capabilities(self, zone_id):\n return self.tado.getCapabilities(zone_id)"
]
| [
"0.70851606",
"0.7014582",
"0.6949531",
"0.68428165",
"0.6804486",
"0.6793715",
"0.67854387",
"0.6769006",
"0.66325563",
"0.66314065",
"0.6474451",
"0.64578134",
"0.6382425",
"0.6382425",
"0.6375141",
"0.63444567",
"0.6304493",
"0.6292689",
"0.6281157",
"0.62223816",
"0.6176099",
"0.6167443",
"0.610909",
"0.61030054",
"0.6097311",
"0.60119545",
"0.5926014",
"0.59249747",
"0.58934605",
"0.5875863"
]
| 0.7209578 | 0 |
The commitment plan associations of Cognitive Services account. | def commitment_plan_associations(self) -> Sequence['outputs.CommitmentPlanAssociationResponse']:
return pulumi.get(self, "commitment_plan_associations") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plans(self):\n title = self.context.Title()\n return self.portal_catalog(portal_type='Plan', Subject=title)",
"def crm_associations(self):\n from hubspot3.crm_associations import CRMAssociationsClient\n\n return CRMAssociationsClient(**self.auth, **self.options)",
"def plans(self):\r\n return pl.Plans(self)",
"def get(self):\n return {\n \"plans\": PLANS,\n }",
"def plans():",
"def get_plans(self):\n return stripe.Plan.all()",
"def getPlan(self):\n return StripePlan(self.base.get(\"plan\", []))",
"def plan(self):\n return [(0, 0), (1, 0), (1, 1), (1, 2), (1, 3)]",
"def plans(self):\r\n return Plans(self)",
"def conections(text):\n limit = 6\n request_conections = get_conections(text, limit)\n request_user = id(text)\n angle = 360 / limit\n angle2 = 30\n principal = {}\n secondary = {}\n data = {}\n create_dict(principal, request_user, \"self\", angle)\n\n i = 1\n for conection in request_conections:\n create_dict(principal, conection, request_user, angle, i)\n i += 1\n conections_conection = get_conections(conection[\"person\"][\"publicId\"], limit - 3)\n j = 1\n for conect in conections_conection:\n create_dict(secondary, conect, conection, angle2, j)\n j += 1\n return json.dumps([principal, secondary])",
"def getAcqPoints(self):\r\n\t\treturn self.acqPoints",
"def assigned_plans(self):\n if \"assignedPlans\" in self._prop_dict:\n return AssignedPlansCollectionPage(self._prop_dict[\"assignedPlans\"])\n else:\n return None",
"def assigned_plans(self):\n if \"assignedPlans\" in self._prop_dict:\n return AssignedPlansCollectionPage(self._prop_dict[\"assignedPlans\"])\n else:\n return None",
"def assigned_plans(self):\n if \"assignedPlans\" in self._prop_dict:\n return AssignedPlansCollectionPage(self._prop_dict[\"assignedPlans\"])\n else:\n return None",
"def accounts(self):\r\n return acc.Accounts(self)",
"def list(cls):\n return cls().requests.get('plan')",
"def get_contracts(self):\n return self.contracts",
"def fetch_plans() -> dict[str, str]:\n plans = mdb.account.plan.find(\n {\"stripe_id\": {\"$exists\": 1}}, {\"_id\": 0, \"stripe_id\": 1, \"name\": 1}\n )\n return {p[\"name\"]: p[\"stripe_id\"] for p in plans}",
"def JointAccount(self):\n joint_accounts = []\n if self.IsGeneralPartner():\n for contact in self.__contact.Party().Contacts(): \n if contact.AdditionalInfo().RegGeneralPartner(): \n joint_accounts.append(contact)\n else:\n FRegulatoryLogger.WARN(logger, \"<%s> is not a General Partner. Hence JointAccount is None\"%self.__contact.Fullname())\n joint_accounts = None\n return joint_accounts",
"def get_pl_balances(self):\n\n\t\tdimension_fields = ['t1.cost_center']\n\n\t\tself.accounting_dimensions = get_accounting_dimensions()\n\t\tfor dimension in self.accounting_dimensions:\n\t\t\tdimension_fields.append('t1.{0}'.format(dimension))\n\n\t\treturn frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.account, t2.account_currency, {dimension_fields},\n\t\t\t\tsum(t1.debit_in_account_currency) - sum(t1.credit_in_account_currency) as bal_in_account_currency,\n\t\t\t\tsum(t1.debit) - sum(t1.credit) as bal_in_company_currency\n\t\t\tfrom `tabGL Entry` t1, `tabAccount` t2\n\t\t\twhere t1.is_cancelled = 0 and t1.account = t2.name and t2.report_type = 'Profit and Loss'\n\t\t\tand t2.docstatus < 2 and t2.company = %s\n\t\t\tand t1.posting_date between %s and %s\n\t\t\tgroup by t1.account, {dimension_fields}\n\t\t\"\"\".format(dimension_fields = ', '.join(dimension_fields)), (self.company, self.get(\"year_start_date\"), self.posting_date), as_dict=1)",
"def principalCollections(self):\n return ()",
"def organizations(self):\n self.elements('organizations')",
"def getConnectedAccounts(**kwargs):\n strProdURL = kwargs[\"strProdURL\"]\n orgID = kwargs[\"ORG_ID\"]\n sessiontoken = kwargs[\"sessiontoken\"]\n\n accounts = get_connected_accounts_json(strProdURL, orgID, sessiontoken)\n orgtable = PrettyTable(['OrgID'])\n orgtable.add_row([orgID])\n print(str(orgtable))\n table = PrettyTable(['Account Number','id'])\n for i in accounts:\n table.add_row([i['account_number'],i['id']])\n \n print(\"Connected Accounts\")\n print(table)",
"def commitment_plan_id(self) -> Optional[str]:\n return pulumi.get(self, \"commitment_plan_id\")",
"def _build_account_map(self):\n account_map = {}\n for profile in self._session.available_profiles:\n self._session.profile = profile\n config = self._session.get_scoped_config()\n account_id = config.get('account_id')\n if account_id:\n account_map[account_id] = profile\n return account_map",
"def accounts(self):\r\n return resources.Accounts(self)",
"def get_plan(self):\n sub = self.get_subscription()\n return sub.plan",
"def network_acls(self) -> Optional['outputs.DataCollectionEndpointResponseNetworkAcls']:\n return pulumi.get(self, \"network_acls\")",
"def _group_by_bank(self):\n rslt = {}\n for company in self:\n if not company.indexa_currency_provider:\n continue\n\n if rslt.get(company.indexa_currency_provider):\n rslt[company.indexa_currency_provider] += company\n else:\n rslt[company.indexa_currency_provider] = company\n return rslt",
"def eligible_authorizations(self) -> Optional[Sequence['outputs.EligibleAuthorizationResponse']]:\n return pulumi.get(self, \"eligible_authorizations\")"
]
| [
"0.55934",
"0.5511926",
"0.53579974",
"0.53434294",
"0.51984334",
"0.51952285",
"0.5044473",
"0.4975472",
"0.49590665",
"0.49435046",
"0.4921552",
"0.49170002",
"0.49170002",
"0.49170002",
"0.4875417",
"0.4866419",
"0.48434013",
"0.48164648",
"0.48160282",
"0.47532672",
"0.4740004",
"0.47231185",
"0.47136238",
"0.470688",
"0.46993318",
"0.46977982",
"0.46977687",
"0.4684117",
"0.4665377",
"0.46611902"
]
| 0.7754084 | 0 |
The deletion date, only available for deleted account. | def deletion_date(self) -> str:
return pulumi.get(self, "deletion_date") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deleted_time(self) -> str:\n return pulumi.get(self, \"deleted_time\")",
"def decommission_date(self):\n return self._decommission_date",
"def delete_time(self) -> str:\n return pulumi.get(self, \"delete_time\")",
"def delete_time(self) -> str:\n return pulumi.get(self, \"delete_time\")",
"def scheduled_purge_date(self) -> str:\n return pulumi.get(self, \"scheduled_purge_date\")",
"def dep_date(self):\n return self._dep_date",
"def is_deleted(self):\n return 'deleted_at' in self._dict and bool(self._dict['deleted_at'])",
"def get_account_created_date(self):\n return self.account_created_date",
"def planned_purge_date(self):\n return self._planned_purge_date",
"def planned_purge_date(self):\n return self._planned_purge_date",
"def validate_not_deleted(self, key, value):\n assert self.deleted_date is None\n return value",
"def destroy_time(self) -> str:\n return pulumi.get(self, \"destroy_time\")",
"def scheduled_deletion_time(self) -> Optional[datetime.datetime]:\n if not self.temporary:\n return None\n\n delta = Project.TEMPORARY_PROJECT_LIFESPANS.get(\n self.account.name, Project.TEMPORARY_PROJECT_LIFESPANS.get(\"default\")\n )\n return self.created + delta",
"def token_expiry_date(self):\n return self.__token_expiry_date",
"def date_created(self) -> str:\n return pulumi.get(self, \"date_created\")",
"def date_created(self):\n return self._date_created",
"def date_created(self):\n return self._date_created",
"def date_created(self):\n return self._date_created",
"def created_date(self):\n return self._created_date",
"def created_date(self):\n return self._created_date",
"def get_expiration_date(self):\n return self.expiration_date",
"def expiration_date(self) -> str:\n return pulumi.get(self, \"expiration_date\")",
"def rt_dep_date(self):\n return self._rt_dep_date",
"def date(self):\n return self.status.created_at",
"def Besuchsende(self):\n return self.getEnddate()",
"def to_be_deleted(self):\n return self.filter(start__lte=timezone.now() - datetime.timedelta(days=1))",
"def date(self):\n return self._date",
"def date(self):\n return self._date",
"def date(self):\n return self._date",
"def date(self):\n return self._date"
]
| [
"0.75667304",
"0.7024116",
"0.6854214",
"0.6854214",
"0.6547442",
"0.6326043",
"0.6143451",
"0.6114624",
"0.6068728",
"0.6068728",
"0.5996998",
"0.5946708",
"0.59344095",
"0.58662504",
"0.586075",
"0.58573246",
"0.58573246",
"0.58573246",
"0.5817896",
"0.5817896",
"0.5806128",
"0.5795721",
"0.57667947",
"0.57652277",
"0.5759801",
"0.5709258",
"0.57064486",
"0.56586653",
"0.56586653",
"0.56586653"
]
| 0.90385526 | 0 |
The private endpoint connection associated with the Cognitive Services account. | def private_endpoint_connections(self) -> Sequence['outputs.PrivateEndpointConnectionResponse']:
return pulumi.get(self, "private_endpoint_connections") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def private_link_endpoint(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"private_link_endpoint\")",
"def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:\n return pulumi.get(self, \"private_endpoint\")",
"def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:\n return pulumi.get(self, \"private_endpoint\")",
"def private_endpoint(self) -> Optional[pulumi.Input['PrivateEndpointArgs']]:\n return pulumi.get(self, \"private_endpoint\")",
"def connected_endpoint(self):\n try:\n if self._connected_interface:\n return self._connected_interface\n except ObjectDoesNotExist:\n pass\n try:\n if self._connected_circuittermination:\n return self._connected_circuittermination\n except ObjectDoesNotExist:\n pass\n return None",
"def get_private_endpoint_connection(\n self,\n resource_group_name, # type: str\n name, # type: str\n private_endpoint_connection_name, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"_models.RemotePrivateEndpointConnectionARMResource\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"_models.RemotePrivateEndpointConnectionARMResource\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2021-01-01\"\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_private_endpoint_connection.metadata['url'] # type: ignore\n path_format_arguments = {\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\\w\\._\\(\\)]+[^\\.]$'),\n 'name': self._serialize.url(\"name\", name, 'str'),\n 'privateEndpointConnectionName': self._serialize.url(\"private_endpoint_connection_name\", private_endpoint_connection_name, 'str'),\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('RemotePrivateEndpointConnectionARMResource', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized",
"def private_link_endpoint(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_link_endpoint\")",
"def private_link_endpoint(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_link_endpoint\")",
"def __get_endpoint(self):\n return self._endpoint",
"def get_endpoint_connection_info(coriolis, barbican, endpoint):\n endpoint_conn_info = coriolis.endpoints.get(endpoint).to_dict().get(\n 'connection_info')\n\n if 'secret_ref' not in endpoint_conn_info:\n # this endpoint is not using Barbican for secret storage:\n return endpoint_conn_info\n\n secret = barbican.secrets.get(endpoint_conn_info['secret_ref'])\n\n return json.loads(secret.payload)",
"def get_endpoint(self):\r\n return self._endpoint",
"def connection(self):\n ctx = stack.top\n if ctx is not None:\n if not hasattr(ctx, 'simple_connection'):\n ctx.simple_connection = connect_to_region(\n self.app.config['AWS_REGION'],\n aws_access_key_id = self.app.config['AWS_ACCESS_KEY_ID'],\n aws_secret_access_key = self.app.config['AWS_SECRET_ACCESS_KEY'],\n )\n\n return ctx.simple_connection",
"def boto_connection(self):\n import boto.ec2\n region = boto.ec2.get_region(self._availability_zone())\n ec2_access_id = self.access_id()\n ec2_secret_key = self.secret_key()\n return region.connect(aws_access_key_id=ec2_access_id, aws_secret_access_key=ec2_secret_key)",
"def _get_connection(rse, endpoint):\n\n key = \"connection:%s_%s\" % (rse, endpoint)\n result = REGION.get(key)\n if type(result) is NoValue:\n try:\n logging.debug(\"Creating connection object\")\n result = None\n credentials = _get_credentials(rse, endpoint)\n if 'access_key' in credentials and credentials['access_key'] and \\\n 'secret_key' in credentials and credentials['secret_key'] and \\\n 'is_secure' in credentials and credentials['is_secure'] is not None:\n\n parsed = urlparse.urlparse(endpoint)\n hostname = parsed.netloc.partition(':')[0]\n port = parsed.netloc.partition(':')[2]\n\n result = boto.connect_s3(aws_access_key_id=credentials['access_key'],\n aws_secret_access_key=credentials['secret_key'],\n host=hostname,\n port=int(port),\n is_secure=credentials['is_secure'],\n calling_format=boto.s3.connection.OrdinaryCallingFormat())\n\n REGION.set(key, result)\n logging.debug(\"Created connection object\")\n else:\n raise exception.CannotAuthenticate(\"Either access_key, secret_key or is_secure is not defined for RSE %s endpoint %s\" % (rse, endpoint))\n except exception.RucioException as e:\n raise e\n except:\n raise exception.RucioException(\"Failed to get connection for RSE(%s) endpoint(%s), error: %s\" % (rse, endpoint, traceback.format_exc()))\n return result",
"def connected_endpoint(self):\n try:\n if self._connected_poweroutlet:\n return self._connected_poweroutlet\n except ObjectDoesNotExist:\n pass\n try:\n if self._connected_powerfeed:\n return self._connected_powerfeed\n except ObjectDoesNotExist:\n pass\n return None",
"def endpoint(self):\r\n return self._endpoint",
"def private_endpoint_subnetwork(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_endpoint_subnetwork\")",
"def endpoint(self):\n return self.Endpoint",
"def get_aws_ips_connection(self):\n return self.m_connection.aws_ips",
"async def connections_endpoints(request: web.BaseRequest):\n context: AdminRequestContext = request[\"context\"]\n connection_id = request.match_info[\"conn_id\"]\n\n profile = context.profile\n connection_mgr = ConnectionManager(profile)\n try:\n endpoints = await connection_mgr.get_endpoints(connection_id)\n except StorageNotFoundError as err:\n raise web.HTTPNotFound(reason=err.roll_up) from err\n except (BaseModelError, StorageError, WalletError) as err:\n raise web.HTTPBadRequest(reason=err.roll_up) from err\n\n return web.json_response(dict(zip((\"my_endpoint\", \"their_endpoint\"), endpoints)))",
"def private_link_service_connection_state(self) -> Optional['outputs.PrivateLinkServiceConnectionStateResponse']:\n return pulumi.get(self, \"private_link_service_connection_state\")",
"def ec2_conn ( self ) :\n if not self.ec2 :\n ec2_region = boto.ec2.get_region( self.aws_region_name )\n self.ec2 = boto.ec2.connection.EC2Connection( aws_access_key_id = self.access_key,\n aws_secret_access_key = self.access_key_secret,\n region = ec2_region )\n return self.ec2",
"def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':\n return pulumi.get(self, \"private_link_service_connection_state\")",
"def get_connect(self):\n\t\treturn self.connect",
"def get_conn(self):\n self.conn = self.get_client_type('sns')\n return self.conn",
"def iamConn():\n cache = _conn_cache\n if not cache:\n cache.append(boto.iam.connect_to_region(getArgs().region))\n return cache[0]",
"def endpoint(self):\n return f'Endpoint = {self._peer.endpoint}'",
"def connection(self):\n return self.get_connection()",
"def private_link_service_connection_state(self) -> Optional[pulumi.Input['PrivateLinkServiceConnectionStateArgs']]:\n return pulumi.get(self, \"private_link_service_connection_state\")",
"def hybrid_connection_endpoint_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"hybrid_connection_endpoint_id\")"
]
| [
"0.6600078",
"0.6578945",
"0.6578945",
"0.65711725",
"0.64803904",
"0.63739693",
"0.6331475",
"0.6331475",
"0.6241816",
"0.62138796",
"0.6195896",
"0.6108234",
"0.6033329",
"0.6002076",
"0.59658813",
"0.5883271",
"0.58768934",
"0.5868595",
"0.5758015",
"0.5714617",
"0.5701596",
"0.5686717",
"0.56807065",
"0.5670432",
"0.56612706",
"0.5650273",
"0.5643147",
"0.5638858",
"0.55808884",
"0.5567679"
]
| 0.67986786 | 0 |
The scheduled purge date, only available for deleted account. | def scheduled_purge_date(self) -> str:
return pulumi.get(self, "scheduled_purge_date") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def planned_purge_date(self):\n return self._planned_purge_date",
"def planned_purge_date(self):\n return self._planned_purge_date",
"def deletion_date(self) -> str:\n return pulumi.get(self, \"deletion_date\")",
"def scheduled_deletion_time(self) -> Optional[datetime.datetime]:\n if not self.temporary:\n return None\n\n delta = Project.TEMPORARY_PROJECT_LIFESPANS.get(\n self.account.name, Project.TEMPORARY_PROJECT_LIFESPANS.get(\"default\")\n )\n return self.created + delta",
"def scheduled_reset_at(self):\n return self._scheduled_reset_at",
"def get_due_date(self):\n return self.created_at + self.urgency_level.duration",
"def decommission_date(self):\n return self._decommission_date",
"def planned_purge_date(self, planned_purge_date):\n\n self._planned_purge_date = planned_purge_date",
"def planned_purge_date(self, planned_purge_date):\n\n self._planned_purge_date = planned_purge_date",
"def scheduled(self):\n return str(self.day) if self.day else '-'",
"def deleted_time(self) -> str:\n return pulumi.get(self, \"deleted_time\")",
"def get_retention_time(self):\n return self.now - datetime.timedelta(days=int(RETENTION_DAYS)) + datetime.timedelta(seconds=int(10))",
"def scheduled_reset_period(self):\n return self._scheduled_reset_period",
"async def date(self) -> dt.date:\n now = await self.AD.sched.get_now()\n return now.astimezone(self.AD.tz).date()",
"def ship_date(self):\n return self.created.date()",
"def scheduled_deletion_warning(self) -> Optional[datetime.datetime]:\n time = self.scheduled_deletion_time\n return time - Project.TEMPORARY_PROJECT_WARNING if time else None",
"def delete_time(self) -> str:\n return pulumi.get(self, \"delete_time\")",
"def delete_time(self) -> str:\n return pulumi.get(self, \"delete_time\")",
"def Besuchsende(self):\n return self.getEnddate()",
"def rt_dep_date(self):\n return self._rt_dep_date",
"def get_expiration_date(self):\n return self.expiration_date",
"def delete_scheduled_events():\n curr_date = date.today()\n\n scheduled_events_all = ScheduledEvent.objects.all()\n\n for event in scheduled_events_all:\n if (curr_date - event.event_date).days > 0:\n event.delete()",
"def run_date(self) -> datetime.date:\n return self.timestamp.date()",
"def due_date(self) -> date:\n return self._due_date",
"def date_registered(self) -> datetime:\n return datetime.utcfromtimestamp(self.registered)",
"def expiration_date(self) -> str:\n return pulumi.get(self, \"expiration_date\")",
"def get_account_created_date(self):\n return self.account_created_date",
"def created_at(self):\n return self._backup_started",
"def current_datetime(self):\n return DateAccessor().today()",
"def get_gds_current_date(self, remove_leading_zero='true'):\r\n time_now = datetime.datetime.now().time()\r\n today_2pm = time_now.replace(hour=14, minute=31, second=0, microsecond=0)\r\n if time_now < today_2pm:\r\n gds_date = datetime.datetime.now() - datetime.timedelta(days=int(1))\r\n else:\r\n gds_date = datetime.datetime.now()\r\n\r\n if remove_leading_zero.lower() == 'true':\r\n return str('{dt.day}{dt:%b}'.format(dt=gds_date).upper())\r\n else:\r\n return self._set_gds_date_format(gds_date)"
]
| [
"0.77084357",
"0.77084357",
"0.6675982",
"0.59855175",
"0.597911",
"0.5939071",
"0.5925011",
"0.5922038",
"0.5922038",
"0.57880515",
"0.578128",
"0.5759633",
"0.56891406",
"0.5666457",
"0.5608823",
"0.5578217",
"0.55437165",
"0.55437165",
"0.55357945",
"0.53633654",
"0.532083",
"0.5274579",
"0.5253672",
"0.5233534",
"0.5223557",
"0.5210216",
"0.51874703",
"0.5180585",
"0.51736444",
"0.51710665"
]
| 0.89512056 | 0 |
Sku change info of account. | def sku_change_info(self) -> 'outputs.SkuChangeInfoResponse':
return pulumi.get(self, "sku_change_info") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sku(self, sku):\n\n self._sku = sku",
"def __init__(__self__, *,\n name: pulumi.Input['SkuName']):\n pulumi.set(__self__, \"name\", name)",
"def change_account(self, account):\r\n check_account = Account(account, steem_instance=self.steem)\r\n self.account = check_account[\"name\"]\r\n self.refresh()",
"def put(self, sku, page=None):\n put_data = api_parser.parse_args()\n product = Product.query.filter(Product.sku == put_data['sku']).first_or_404()\n product.name = put_data['name']\n product.description = put_data.get('description')\n product.is_active = put_data.get('is_active')\n db.session.add(product)\n db.session.commit()\n\n return marshal(product, product_fields), 200",
"def changeRoleInfo(self, role, info):",
"def put(self):\n request_data = request.get_json()\n plan = request_data[\"plan\"]\n\n user = get_authenticated_user()\n if not user.stripe_id:\n raise InvalidRequest()\n\n price = get_price(plan, False)\n if not price:\n abort(404, message=\"Plan not found\")\n\n return change_subscription(user, price)",
"def test_update_profile(self):\n self.cim.update_profile(\n customer_id=u\"222\",\n description=u\"Foo bar baz quz\",\n email=u\"[email protected]\",\n customer_profile_id=u\"122\"\n )",
"def put_account(self, account):\n \n pass",
"def test_put_small_and_light_enrollment_by_seller_sku(self):\n pass",
"def setSkillInfo(self, name, information):\r\n skills[name].info = information",
"def __init__(__self__, *,\n name: pulumi.Input[Union[str, 'SkuName']]):\n pulumi.set(__self__, \"name\", name)",
"def update(self, account):\n model = models.load('Account', account)\n return self.client.update_account(model=model)",
"def onAccountUpdate(self, data):\n pass",
"def update_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # Premium without LTS is ok (not vice versa)\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_no_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_FREE:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Free\"\n )\n return mc",
"def update_account_data(self):\n self.ensure_one()\n getattr(self, '%s_update_account_data' % self.provider, lambda: None)()",
"def put(self, orgname):\n permission = AdministerOrganizationPermission(orgname)\n if permission.can():\n request_data = request.get_json()\n plan = request_data[\"plan\"]\n\n organization = model.organization.get_organization(orgname)\n if not organization.stripe_id:\n raise InvalidRequest()\n\n price = get_price(plan, True)\n if not price:\n abort(404, message=\"Plan not found\")\n\n return change_subscription(organization, price)\n\n raise Unauthorized()",
"def _setaccount_with_institution_57A(self, val):\n self.swift_obj.AccountWithInstitution_A = val\n self.swift_obj.AccountWithInstitution_A.swiftTag = '57A'",
"def sku(self) -> 'outputs.SkuResponse':\n return pulumi.get(self, \"sku\")",
"def sku(self) -> 'outputs.SkuResponse':\n return pulumi.get(self, \"sku\")",
"def _setaccount_with_institution_57C(self, val):\n self.swift_obj.AccountWithInstitution_C = val\n self.swift_obj.AccountWithInstitution_C.swiftTag = '57C'",
"def change_name(change_account):\n change_data(change_account, changed_data='name')",
"def change_plan(request):\n\n data = request.data\n\n start_date = datetime.datetime.now().strftime(\"%c\")\n end_date = end_date = (datetime.datetime.now() + datetime.timedelta(30)).strftime(\"%x\")\n \n # print(data[\"subscription_plan\"])\n \n try: \n user = User.objects.get(email=request.user) \n customer = Customer.objects.get(user=user)\n subscription_plan = SubscriptionPlan.objects.get(subscription_plan_name=data[\"subscription_plan\"])\n\n if customer.is_subscribe:\n stripe.Subscription.delete(\n customer.subscription_id,\n ) \n\n plan_id = \"price_1JsHMxSDkRo5FXlkOsq2QHSV\"\n\n if data[\"subscription_plan\"]== \"Globalnet Silver\":\n plan_id = \"price_1JsHOJSDkRo5FXlkQmfEQzhN\"\n \n if data[\"subscription_plan\"]== \"Globalnet Gold\":\n plan_id = \"price_1JsHPFSDkRo5FXlk9VSl41rV\"\n\n # Create new stripe subscription\n subscription = stripe.Subscription.create(\n customer = customer.stripe_id,\n items = [{'plan':plan_id}]\n ) \n \n # Update SubscriptionData \n subscription_user_data = SubscriptionData.objects.filter(subscriber=customer.primary_number) \n for data_subscriber in subscription_user_data:\n if(data_subscriber.subscription_start == customer.start_date):\n data_subscriber.subscription_end = start_date \n data_subscriber.save() \n break \n \n \n # Change subscription plan info\n customer.subscription_plan = subscription_plan\n customer.start_date = start_date\n customer.end_date = end_date\n customer.subscription_id = subscription.id\n customer.is_subscribe = True\n customer.save()\n \n # Create new subscription data \n SubscriptionData.objects.create(\n subscriber = customer.primary_number,\n subscription = subscription_plan.subscription_plan_name,\n subscription_start = start_date,\n subscription_end = end_date \n \n )\n \n serializer= CustomerSerializer(customer,many=False)\n \n return Response(serializer.data)\n \n except Exception as e: \n message = {\"Error\":str(e)}\n return Response(message)",
"def updateSkirtColor(self, skirtColor): \n self.avatarConfiguration[\"skirt\"] = str(skirtColor)\n self.paintSkirt()",
"def update_cloud_account_name(cls, body: AwsCloudAccountUpdateName) -> Dict:\n\t\tpass",
"def change_surname(change_account):\n change_data(change_account, changed_data='surname')",
"def test_update_payment_profile(self):\n self.cim.update_payment_profile(\n customer_profile_id=u\"122\",\n customer_payment_profile_id=u\"444\",\n card_number=u\"422222222222\",\n expiration_date=u\"2009-10\"\n )",
"def update_account(row, account):\n if row['LAST_UPDATED_FROM_PAYGOV']:\n updated_at = datetime_from(row['LAST_UPDATED_FROM_PAYGOV'])\n account.donations.filter(time__lte=updated_at).delete()\n if account.category == Account.PROJECT:\n set_balances(row, account)\n account.save()",
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account"
]
| [
"0.6504229",
"0.62209433",
"0.595125",
"0.5903116",
"0.57759696",
"0.57391024",
"0.56339806",
"0.5585082",
"0.5583133",
"0.55682266",
"0.5565345",
"0.5531113",
"0.5486291",
"0.54483426",
"0.54368883",
"0.54215336",
"0.53766495",
"0.53355706",
"0.53355706",
"0.53174454",
"0.52900547",
"0.5283346",
"0.5279604",
"0.5267293",
"0.5221997",
"0.51841533",
"0.5178421",
"0.5165266",
"0.5165266",
"0.5165266"
]
| 0.64245176 | 1 |
The api properties for special APIs. | def api_properties(self) -> Optional['outputs.ApiPropertiesResponse']:
return pulumi.get(self, "api_properties") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _setup_api_properties(self):\n self.implicit_api_logical_id = GeneratedLogicalId.implicit_http_api()\n self.implicit_api_condition = \"ServerlessHttpApiCondition\"\n self.api_event_type = \"HttpApi\"\n self.api_type = SamResourceType.HttpApi.value\n self.api_id_property = \"ApiId\"\n self.editor = OpenApiEditor",
"def get_api_fields(cls):\n return ['fqdn', 'ttl', 'description', 'views']",
"def api(self) -> str:",
"def api_metadata_properties(self) -> Optional[pulumi.Input['GatewayApiMetadataPropertiesArgs']]:\n return pulumi.get(self, \"api_metadata_properties\")",
"def api_params(self):\n return self._api_params",
"def as_api_parameters(self):\n raise NotImplementedError(\n 'as_api_parameters not implemented on ' + self.__class__)",
"def Modifier_API(self):\n\t\tpass",
"def properties(self):\n raise NotImplementedError",
"def api_access(self):\n return self._api_access",
"def api(self):\n return self.__api",
"def properties(self):",
"def properties(self):",
"def properties(self):",
"def get_properties():",
"def properties(self):\n pass",
"def test_get_api_resources(self):\n pass",
"def api(self):\r\n return self._api",
"def __init__(self, api=None, properties=None):\n if not api is None:\n self.api = api",
"def api(self):\n return self._api",
"def api(self):\n return self._api",
"def api(self):\n return self._api",
"def api(self):\n return self._api",
"def getProperties():",
"def api_keys(self) -> dict:\n return self.AUTH.get_api_keys()",
"def feature_dynamic_windowsapi(self):\n self.features[\"api_stats\"] = {}\n apistats = self.report.get(\"behavior\", {}).get(\"apistats\", {})\n for d in apistats:\n for e in apistats[d]:\n if e in self.features[\"api_stats\"]:\n self.features[\"api_stats\"][e] += apistats[d][e]\n else:\n self.features[\"api_stats\"][e] = apistats[d][e]",
"def __properties__(self) -> dict:\r\n parameters = [\r\n d for d in dir(self) if (d[0] != \"_\") and (d.count(\"set\") == 0)\r\n and (d.count(\"_c\") == 0) and (d.count(\"_f\") == 0)\r\n ]\r\n\r\n return self.__as_json__(parameters)",
"def attributes(self):\n raise NotImplementedError",
"async def api():\n return {\n \"links\": {\n \"datasets\": \"/datasets\",\n \"natural_earth_feature\": \"/natural_earth_feature\",\n \"viewport\": \"/viewport\"\n }\n }",
"def as_api_parameters(self):\n return {\n 'return_token': self.token,\n 'return_url': self.url,\n 'client_http_user_agent': self.user_agent,\n 'client_http_accept': self.accept,\n 'remote_site': self.remote_site,\n }",
"def test_properties_get(self):\n pass"
]
| [
"0.7447788",
"0.70224255",
"0.65719783",
"0.65687895",
"0.64152324",
"0.64069265",
"0.6361345",
"0.63350254",
"0.6326089",
"0.6255719",
"0.6232385",
"0.6232385",
"0.6232385",
"0.6178087",
"0.61595166",
"0.61541235",
"0.61463284",
"0.6104891",
"0.5966381",
"0.5966381",
"0.5966381",
"0.5966381",
"0.5906774",
"0.5875874",
"0.5862375",
"0.58613276",
"0.5852105",
"0.5847129",
"0.5819212",
"0.57681674"
]
| 0.7195016 | 1 |
The flag to enable dynamic throttling. | def dynamic_throttling_enabled(self) -> Optional[bool]:
return pulumi.get(self, "dynamic_throttling_enabled") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_change_default_throttling_settings_http_with_overwrite_throttled_rate_above_50():",
"def test_change_default_throttling_settings_http_with_overwrite_throttled():",
"def test_change_default_throttling_settings_http_with_overwrite_throttled_burst_above_50():",
"def should_be_throttled(self, resource):\r\n pass",
"def test_change_default_throttling_settings_http_with_overwrite_not_throttled():",
"def set_throttling(self, state: bool, value: int = 0):\r\n if state:\r\n self.msg_send_upr.data[0] = b\"\\x26\"[0]\r\n self.msg_send_upr.data[2] = value\r\n else:\r\n self.msg_send_upr.data[:3] = b\"\\x27\"[0]\r\n self.send_and_flush(self.msg_send_upr)",
"def should_be_throttled(self, identifier, **kwargs):\r\n return False",
"def _throttle_time(provider):\n if provider == 'nominatim':\n return 1\n else:\n return 0",
"def test_change_default_throttling_settings_http_with_overwrite_throttled_rate_above_account_quota():",
"def enable_rate_limiting(self, enable, use_percent):\n self._config.set_default_rate_limiting_percentage(use_percent)\n if enable and self._rate_limiter_map is None:\n self._rate_limiter_map = RateLimiterMap()\n self._table_limit_update_map = dict()\n self._threadpool = pool.ThreadPool(1)\n elif not enable and self._rate_limiter_map is not None:\n self._rate_limiter_map.clear()\n self._rate_limiter_map = None\n self._table_limit_update_map.clear()\n self._table_limit_update_map = None\n if self._threadpool is not None:\n self._threadpool.close()\n self._threadpool = None",
"def setDebounce(self, enable): \n if enable == True:\n DPxEnableDinDebounce()\n else:\n DPxDisableDinDebounce()",
"def test_request_throttling_is_per_user(self):\n self.ensure_is_throttled(MockView, 200)",
"def test_change_default_throttling_settings_http_with_overwrite_throttled_burst_above_account_quota():",
"def prevent_throttling(self):\n now = time.time()\n if now - self.last_request < 1:\n logger.verbose(\"Last request issued less than 1 second ago\")\n logger.verbose(\"Sleep {0} second to avoid throttling.\",\n SHORT_WAITING_INTERVAL)\n time.sleep(SHORT_WAITING_INTERVAL)\n self.last_request = now\n\n self.req_count += 1\n if self.req_count % 3 == 0:\n logger.verbose(\"Sleep {0} second to avoid throttling.\",\n SHORT_WAITING_INTERVAL)\n time.sleep(SHORT_WAITING_INTERVAL)\n self.req_count = 0",
"def test_disable_limiter(monkeypatch):\n monkeypatch.setattr(planet.http, 'RATE_LIMIT', 0)\n monkeypatch.setattr(planet.http, 'MAX_ACTIVE', 0)",
"def batching_enabled(self) -> bool:\n ...",
"def EnableFreeAPIKeyRateLimit(self):\n self._hashes_per_batch = 4\n self._wait_after_analysis = 60.0",
"def get_throttle_factor(self): # pragma: no cover\n raise NotImplementedError()",
"async def test_throttle(self):\n calls = []\n\n @Throttle(seconds=5)\n async def test_throttle(force=False):\n calls.append(1)\n\n now = int(time.time())\n now_plus_four = now + 4\n now_plus_six = now + 6\n\n await test_throttle()\n self.assertEqual(1, len(calls))\n\n # Call again, still shouldn't fire\n await test_throttle()\n self.assertEqual(1, len(calls))\n\n # Call with force\n await test_throttle(force=True)\n self.assertEqual(2, len(calls))\n\n # Call without throttle, shouldn't fire\n await test_throttle()\n self.assertEqual(2, len(calls))\n\n # Fake time as 4 seconds from now\n with mock.patch(\"time.time\", return_value=now_plus_four):\n await test_throttle()\n self.assertEqual(2, len(calls))\n\n # Fake time as 6 seconds from now\n with mock.patch(\"time.time\", return_value=now_plus_six):\n await test_throttle()\n self.assertEqual(3, len(calls))",
"def isAllowDelay(self):\n return self.__allowDelay",
"def _check_throttles_decorator(func):\n @wraps(func)\n def _decorated(*args, **kwargs):\n # Skip the throttle check entirely if we've disabled rate limiting.\n # Otherwise, perform the checks (as usual)\n if RateLimitConfiguration.current().enabled:\n return func(*args, **kwargs)\n else:\n msg = \"Rate limiting is disabled because `RateLimitConfiguration` is not enabled.\"\n LOGGER.info(msg)\n return\n\n return _decorated",
"def set_throttle_timer(self, view, value):\n for cls in view.throttle_classes:\n cls.timer = lambda self: value",
"def EnableFreeAPIKeyRateLimit(self):\n self._analyzer.hashes_per_batch = 4\n self._analyzer.wait_after_analysis = 60\n self._analysis_queue_timeout = self._analyzer.wait_after_analysis + 1",
"def EnableBackoffIteration(self):\n\t\treturn self._get_attribute('enableBackoffIteration')",
"def test(self):\n return test_throttle_method()",
"def isDebounceEnabled(self):\n if DPxIsDinDebounce() == 0:\n debouce = False\n else:\n debouce = True\n return debouce",
"def _send_sampled_event(self):\n if not self.enabled:\n return False\n send_sample = False\n self.count += 1\n if self.actual_rate < self.statsd_sample_rate:\n self.monitored += 1\n send_sample = True\n self.actual_rate = float(self.monitored) / float(self.count)\n if self.count >= maxint or self.monitored >= maxint:\n self.count = 0\n self.monitored = 0\n return send_sample",
"def EnableFastConvergence(self):\n\t\treturn self._get_attribute('enableFastConvergence')",
"def bursting_enabled(self) -> Optional[bool]:\n return pulumi.get(self, \"bursting_enabled\")",
"def set_bounced(self, status: bool):\n self._bounced = status"
]
| [
"0.6286432",
"0.6163197",
"0.6152936",
"0.6117926",
"0.605267",
"0.5982151",
"0.5967855",
"0.5847482",
"0.5758198",
"0.5755627",
"0.5700462",
"0.569529",
"0.5694182",
"0.5687345",
"0.5619024",
"0.55816823",
"0.5572103",
"0.55682355",
"0.5496888",
"0.5494714",
"0.54758257",
"0.5461495",
"0.54577565",
"0.5414391",
"0.5406858",
"0.5322718",
"0.53196704",
"0.5316969",
"0.53108823",
"0.52833974"
]
| 0.8219248 | 0 |
(Metrics Advisor Only) The Azure AD Client Id (Application Id). | def aad_client_id(self) -> Optional[str]:
return pulumi.get(self, "aad_client_id") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def client_id(self) -> str:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> str:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> str:\n return pulumi.get(self, \"client_id\")",
"def client_id(self):\n return self.__client_id",
"def client_id(self) -> str:",
"def client_id(self):\n\n return self.__client_id",
"def client_app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_app_id\")",
"def client_id(self) -> str:\n return self.get_env_var(self.client_id_var)",
"def client_id(self) -> str:\n return self.get_env_var(self.client_id_var)",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"client_id\")",
"def get_client_id():\n\n return str(get_account().Get(GOA_ACCOUNT_OAUTH2, 'ClientId',\n dbus_interface=PROPERTIES))",
"def ApplicationId(self) -> _n_0_t_0:",
"def _get_client_id(self, context):\n for key, value in context.invocation_metadata():\n if key == 'client_id':\n return value\n raise Exception('client id not found')",
"def get_clientid(self):\n\n url = f'https://{self.__api}/v1/objects/client'\n body = {\"filter\": {}}\n with requests.post(url, json=body,\n headers={'X-WallarmAPI-UUID': self.__uuid,\n 'X-WallarmAPI-Secret': self.__secret}) as response:\n if response.status_code not in [200, 201, 202, 204, 304]:\n raise NonSuccessResponse(response.status_code, response.content)\n return response.json().get('body')[0].get('id')",
"def client_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_id\")"
]
| [
"0.72582203",
"0.72582203",
"0.72582203",
"0.7254328",
"0.7227845",
"0.71365494",
"0.71189344",
"0.6955662",
"0.6955662",
"0.686798",
"0.686798",
"0.686798",
"0.686798",
"0.686798",
"0.686798",
"0.686798",
"0.686798",
"0.686798",
"0.686798",
"0.686798",
"0.6854775",
"0.6781222",
"0.6754085",
"0.6748058",
"0.67113435",
"0.67046446",
"0.67046446",
"0.67046446",
"0.67046446",
"0.67046446"
]
| 0.8002165 | 0 |
(QnAMaker Only) The Azure Search endpoint id of QnAMaker. | def qna_azure_search_endpoint_id(self) -> Optional[str]:
return pulumi.get(self, "qna_azure_search_endpoint_id") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def qna_azure_search_endpoint_key(self) -> Optional[str]:\n return pulumi.get(self, \"qna_azure_search_endpoint_key\")",
"def endpoint_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_id\")",
"def endpoint_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_id\")",
"def endpoint_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_id\")",
"def endpoint_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_id\")",
"def endpoint_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_id\")",
"def endpoint_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_id\")",
"def endpoint_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"endpoint_id\")",
"def endpoint_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"endpoint_id\")",
"def endpoint_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"endpoint_id\")",
"def eventhub_endpoint_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"eventhub_endpoint_id\")",
"def eventhub_endpoint_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"eventhub_endpoint_id\")",
"def resolver_endpoint_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resolver_endpoint_id\")",
"def eventhub_endpoint_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"eventhub_endpoint_id\")",
"def get_id(self):\n return self.get_api_endpoint()",
"def resolver_endpoint_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"resolver_endpoint_id\")",
"def qna_runtime_endpoint(self) -> Optional[str]:\n return pulumi.get(self, \"qna_runtime_endpoint\")",
"def endpoint_reference(self) -> Optional[str]:\n return pulumi.get(self, \"endpoint_reference\")",
"def endpoint_reference(self) -> Optional[str]:\n return pulumi.get(self, \"endpoint_reference\")",
"def endpoint_reference(self) -> Optional[str]:\n return pulumi.get(self, \"endpoint_reference\")",
"def endpoint_reference(self) -> Optional[str]:\n return pulumi.get(self, \"endpoint_reference\")",
"def endpoint_reference(self) -> Optional[str]:\n return pulumi.get(self, \"endpoint_reference\")",
"def endpoint_reference(self) -> Optional[str]:\n return pulumi.get(self, \"endpoint_reference\")",
"def peer_azure_app_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"peer_azure_app_id\")",
"def endpoint_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_uri\")",
"def endpoint_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_uri\")",
"def EndpointSetId(self):\n return self._get_attribute('endpointSetId')",
"def peer_azure_app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"peer_azure_app_id\")",
"def service_bus_topic_endpoint_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_bus_topic_endpoint_id\")",
"def service_bus_topic_endpoint_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_bus_topic_endpoint_id\")"
]
| [
"0.75765604",
"0.70053834",
"0.70053834",
"0.70053834",
"0.70053834",
"0.6863342",
"0.6863342",
"0.6831415",
"0.6831415",
"0.6831415",
"0.6378568",
"0.6378568",
"0.6342887",
"0.6290179",
"0.62154305",
"0.6213062",
"0.6106662",
"0.60928464",
"0.60928464",
"0.60928464",
"0.60928464",
"0.60928464",
"0.60928464",
"0.6018659",
"0.598696",
"0.598696",
"0.59631187",
"0.5917956",
"0.58759737",
"0.58759737"
]
| 0.82035816 | 0 |
(QnAMaker Only) The Azure Search endpoint key of QnAMaker. | def qna_azure_search_endpoint_key(self) -> Optional[str]:
return pulumi.get(self, "qna_azure_search_endpoint_key") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def qna_azure_search_endpoint_id(self) -> Optional[str]:\n return pulumi.get(self, \"qna_azure_search_endpoint_id\")",
"def _get_query_api_key(self, params: Dict) -> Optional[str]:\n return None",
"def endpoint_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_id\")",
"def endpoint_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_id\")",
"def endpoint_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_id\")",
"def endpoint_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_id\")",
"def external_key_uri(self) -> str:\n return pulumi.get(self, \"external_key_uri\")",
"def key_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key_url\")",
"def key_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key_url\")",
"def api_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_key\")",
"def api_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_key\")",
"def api_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_key\")",
"def api_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_key\")",
"def endpoint_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_id\")",
"def endpoint_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_id\")",
"def host_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key\")",
"def host_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key\")",
"def host_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key\")",
"def host_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key\")",
"def host_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key\")",
"def host_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key\")",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")"
]
| [
"0.760087",
"0.6005936",
"0.59957916",
"0.59957916",
"0.59957916",
"0.59957916",
"0.5990177",
"0.59611803",
"0.59611803",
"0.58993065",
"0.58993065",
"0.58993065",
"0.58993065",
"0.58035594",
"0.58035594",
"0.579497",
"0.579497",
"0.579497",
"0.579497",
"0.579497",
"0.579497",
"0.575103",
"0.575103",
"0.575103",
"0.575103",
"0.575103",
"0.575103",
"0.575103",
"0.575103",
"0.575103"
]
| 0.84970903 | 0 |
(QnAMaker Only) The runtime endpoint of QnAMaker. | def qna_runtime_endpoint(self) -> Optional[str]:
return pulumi.get(self, "qna_runtime_endpoint") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def endpoint(self) -> str:\n return pulumi.get(self, \"endpoint\")",
"def endpoint(self) -> str:\n return pulumi.get(self, \"endpoint\")",
"def main():\n return execute_api(Freta(), [Endpoint], __version__)",
"def endpoint(self):\n return self.Endpoint",
"async def __anext__(self):\n return await self._endpoint.__anext__()",
"def get_endpoint(self, *args):\n\t\traise NotImplementedError",
"def endpoint(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"endpoint\")",
"def endpoint(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"endpoint\")",
"def getApi(self):\n return _SALOMERuntime.RuntimeSALOME_getApi(self)",
"def process_endpoint(self) -> str:\n return pulumi.get(self, \"process_endpoint\")",
"def endpoint(self):\r\n return self._endpoint",
"def get_api_endpoint(self):\n return \"tcp://%s:%s\" % (self.ip, self.port)",
"def api_endpoint():\n return 'localhost'",
"def api_endpoint(self) -> str:\n return pulumi.get(self, \"api_endpoint\")",
"def api(self) -> str:",
"def endpoint(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint\")",
"def endpoint(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint\")",
"def get_endpoint(self):\r\n return self._endpoint",
"def __get_endpoint(self):\n return self._endpoint",
"def custom_service_endpoint(self) -> global___Snippet.ClientInitialization.ServiceEndpoint:",
"def service(self):\n pass",
"def run(approot, instance):\n tm_env = appenv.AppEnvironment(approot)\n publisher = endpoints.EndpointPublisher(tm_env.endpoints_dir,\n context.GLOBAL.zk.conn,\n instance=instance)\n publisher.run()",
"def runtime(self) -> str:\n return self._runtime",
"def client(self):\n raise NotImplementedError()",
"def runtime(self) -> str:\n return self._node[\"app_data\"].get(\"runtime\")",
"def backend(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"backend\")",
"def LocalEndpoint(self) -> _n_5_t_1:",
"def test_api_calls_no_parameters(self):\n quantum_program = self._get_quantum_program()\n\n # Invoke with no hub, group or project parameters.\n quantum_program.set_api(QE_TOKEN, QE_URL)\n\n self.log.info(quantum_program.online_backends())\n self.log.info(quantum_program.get_backend_parameters(self.backend))\n self.log.info(quantum_program.get_backend_calibration(self.backend))",
"def discovery_endpoint(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"discovery_endpoint\")",
"def _get_base_endpoint_name(self):"
]
| [
"0.5615672",
"0.5615672",
"0.5560079",
"0.5543251",
"0.5528619",
"0.55271727",
"0.54968864",
"0.54968864",
"0.5390125",
"0.5370197",
"0.53535056",
"0.53421795",
"0.5330651",
"0.5318378",
"0.5286095",
"0.52837664",
"0.52837664",
"0.5264753",
"0.5225494",
"0.5222705",
"0.5215595",
"0.52073795",
"0.52020925",
"0.5194703",
"0.517932",
"0.51625746",
"0.51595324",
"0.5140599",
"0.51403046",
"0.51229304"
]
| 0.7168018 | 0 |
(Bing Search Only) The flag to enable statistics of Bing Search. | def statistics_enabled(self) -> Optional[bool]:
return pulumi.get(self, "statistics_enabled") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def uses_statistics(self):\n return True",
"def query_insights_enabled(self) -> bool:\n return pulumi.get(self, \"query_insights_enabled\")",
"def statistics(self, **_):\n raise NotImplementedError(\"{} doesn't support statistics.\".format(__class__.__name__))",
"def event_ball_search_enable(self, **kwargs):\n del kwargs\n self.ball_search.enable()",
"def test_otoroshi_controllers_adminapi_analytics_controller_filterable_stats(self):\n pass",
"def advancedStats():",
"def db_stats(self):\n return { \"search_and_get\": self.db_search_and_get }",
"def stats(self):\n pass",
"def __init__(self,\n query,\n api_key,\n version=None):\n if not version:\n version = 1\n PyBingSearch.__init__(self,\n api_key=api_key,\n query=query,\n query_base=self.WEB_QUERY_BASE,\n version=version)",
"def statistics(self):\n raise NotImplemented()",
"def statflag(self) -> str | None:\n return self.status.get(\"STATFLAG\")",
"def query_aggregation(self) -> ConfigNodePropertyBoolean:\n return self._query_aggregation",
"def is_searchable(self):\n return self._get_search_query() != ''",
"def stats(self):",
"def set_search_properties(self, metric: Optional[str], mode: Optional[str],\r\n config: Dict) -> bool:\r\n return False",
"def stats(self):\n raise NotImplementedError(\"Must implement in frontend subclass.\")",
"def is_send_statistic_report(self):\n\t\treturn bool(call_sdk_function('PrlDispCfg_IsSendStatisticReport', self.handle))",
"def is_statistics_capable():\n context = package_context.get_context()\n\n analyzer = \"clangsa\"\n enabled_analyzers = [analyzer]\n cfg_handlers = analyzer_types.build_config_handlers({},\n context,\n enabled_analyzers)\n\n clangsa_cfg = cfg_handlers[analyzer]\n analyzer = analyzer_types.supported_analyzers[analyzer](clangsa_cfg,\n None)\n\n check_env = analyzer_env.get_check_env(context.path_env_extra,\n context.ld_lib_path_extra)\n\n checkers = analyzer.get_analyzer_checkers(clangsa_cfg, check_env)\n\n stat_checkers_pattern = re.compile(r'.+statisticscollector.+')\n\n for checker_name, _ in checkers:\n if stat_checkers_pattern.match(checker_name):\n return True\n\n return False",
"def pg_stat_monitor_enable(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"pg_stat_monitor_enable\")",
"def user_query_stats_helper(request, search_query, base_brand):\n\n # print(\"Got: request %r\" % request)\n print(\"Got search_query %r\" % search_query)\n print(\"Got base_brand %r\" % base_brand)\n\n mongo_utils.track_visit(request)\n\n # first prettify the query for mandrill, intercom, and slack\n try:\n only_setup_params = find_non_default_query(search_query)\n if only_setup_params is None or only_setup_params == [{}]:\n only_setup_params = {}\n query_formatted = format_query_for_displaying(only_setup_params)\n print \"only_setup_params = [%r] query_formatted = [%r]\" % (only_setup_params, query_formatted)\n except:\n a = json.dumps(search_query, sort_keys=True, indent=4, separators=(',', ': '))\n query_formatted = 'Problem in formatting %r' % a\n pass\n\n mongo_utils.track_query(\"brand-search-query\", query_formatted, {\"user_id\": request.visitor[\"auth_user\"].id})\n\n account_helpers.intercom_track_event(request, \"brand-search-query\", {\n 'query': query_formatted,\n })\n\n if base_brand:\n user = User.objects.get(id=request.user.id)\n if base_brand.flag_trial_on and not account_helpers.internal_user(user):\n slack_msg = \"\\n**************\\nBrand = \" + base_brand.domain_name + \" User: \" + request.user.email + \"\\n\" + query_formatted\n account_helpers.send_msg_to_slack('brands-trial-activity', slack_msg)\n\n base_brand.saved_queries.create(query=json.dumps(search_query), user=request.user)",
"def LatencyBinsEnabled(self):\n\t\treturn self._get_attribute('latencyBinsEnabled')",
"def displayStatistics(self):\n return \"\"",
"def stats(self):\r\n\t\tdata = self._get('global/', query=None)\r\n\t\treturn data",
"def stat(**kwargs):\n print(\"output stats\")",
"def _send_sampled_event(self):\n if not self.enabled:\n return False\n send_sample = False\n self.count += 1\n if self.actual_rate < self.statsd_sample_rate:\n self.monitored += 1\n send_sample = True\n self.actual_rate = float(self.monitored) / float(self.count)\n if self.count >= maxint or self.monitored >= maxint:\n self.count = 0\n self.monitored = 0\n return send_sample",
"def search(self, query, maxhits=100):",
"def stats_search(self, host):\n\n s = self.get_stats(host, 'search')\n\n data = {\n 'query_total': s['query_total'],\n 'fetch_time_in_millis': s['query_time_in_millis'],\n 'fetch_total': s['fetch_total'],\n 'query_time_in_millis': s['fetch_time_in_millis'],\n 'open_contexts': s['open_contexts'],\n 'fetch_current': s['fetch_current'],\n 'query_current': s['query_current']\n }\n\n return data",
"def event_stats(self):\n pass",
"def get_keyword_stats(self, adgroup_id, batch=False):\n path = '%s/keywordstats' % adgroup_id\n return self.make_request(path, 'GET', batch=batch)",
"def set_show_all_stats(show_all_stats:bool=False):\n _default_recorder.show_all_stats = show_all_stats"
]
| [
"0.6719449",
"0.58057433",
"0.55334",
"0.54760736",
"0.5386774",
"0.53164434",
"0.51713836",
"0.5164361",
"0.5161711",
"0.50041795",
"0.49931997",
"0.49914446",
"0.49898",
"0.49377626",
"0.49150836",
"0.4907638",
"0.48831502",
"0.4852127",
"0.48485795",
"0.4833888",
"0.4824938",
"0.48237926",
"0.48169997",
"0.48120025",
"0.4807508",
"0.48034778",
"0.47957897",
"0.4780031",
"0.47773296",
"0.47631228"
]
| 0.6361315 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.