query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
returns the new values when stage_id has changed
def _onchange_stage_id_values(self, stage_id): if not stage_id: return {} print('1111') call_attempt = len(self.env['call.attempt'].browse(self.call_attempt_ids)) call_pitch = len(self.env['call.pitch'].browse(self.call_pitch_ids)) contact_meeting = len(self.env['contact.meeting'].browse(self.contact_meeting_ids)) # file_attached = len(self.env['ir.attachment'].search([('res_model','=','res.partner'),('res_id','=',self.id)])) msg='' ## file attached file_attached = len( self.env['ir.attachment'].search([('res_model', '=', 'res.partner'), ('res_id', '=', self.id)])) if self.stage_id.id in (8, 16) and file_attached == 0: msg = msg + ' - Upload at least one file \n' ## if self.stage_id.id == 2 and call_attempt == 0: msg = msg + ' - Call Attempt \n' if self.stage_id.id == 3 and call_pitch == 0: msg = msg + ' - Call Pitch \n' if self.stage_id.id == 9 and self.date_call_back_one == False: msg = msg + ' - Date (callback) ' if self.stage_id.id == 10 and self.date_meeting_set == False: msg = msg + ' - Date (meeting set) \n' if self.stage_id.id == 6 and self.date_preagreement == False: msg = msg + ' - Date (pre_agreement) \n' ## individual and company contact if self.stage_id.id in (8,16) and self.mobile == False: msg = msg + ' - Mobile \n' if self.stage_id.id in (8,16) and self.email == False: msg = msg + ' - Email \n' if self.stage_id.id in (8, 16) and self.street == False: msg = msg + ' - Street in Adress \n' if self.stage_id.id in (8,16) and self.lang == False: msg = msg + ' - Language \n' if self.stage_id.id in (8, 16) and self.business_developer_id == False: msg = msg + ' - Business Developer \n' if self.stage_id.id in (8,16) and self.vat == False: msg = msg + ' - TIN \n' ## individual contact if self.stage_id.id in (8,16) and self.parent_id and self.parent_id.street== False: msg = msg + ' - Invoicing Address (Company Adress) \n' if self.stage_id.id in (8,16) and self.inami == False: msg = msg + ' - INAMI \n' if self.stage_id.id in (8,16) and self.subscription_type == False: msg = msg + ' - Subscription Type \n' if self.stage_id.id in (8,16) and not self.title and self.is_company != True: msg = msg + ' - Title \n' if self.stage_id.id in (8,16) and self.specialization == False: msg = msg + ' - Specialization \n' ### Prospection process if self.stage_id.id in (8,16) and self.date_signed == False: msg = msg + ' - Date(Signed) \n' if self.stage_id.id in (8, 16) and self.bd_signed == False: msg = msg + ' - Business Developer (Signed) \n' if self.stage_id.id in (8, 16) and self.comment_signed == False: msg = msg + ' - Comment (Signed) \n' ### Subscription details if self.stage_id.id in (8,16) and self.subscription_month == False: msg = msg + ' - Monthly subscription \n' if self.stage_id.id in (8,16) and self.subscription_commitment == False: msg = msg + ' - Commitment \n' if self.stage_id.id in (8,16) and self.subscription_upfront_payment == False: msg = msg + ' - Upfront Payment \n' if self.stage_id.id in (8,16) and self.subscription_upfront_turnover == False: msg = msg + ' - Upfront turnover \n' if self.stage_id.id in (8,16) and self.subsciption_part_condition == False: msg = msg + ' - Particular Conditions \n' ## stage activated and only individuals if self.stage_id.id == 16 and self.doctor_admin == False: msg = msg + ' - Doctor AdminID \n' ### stage account managment if self.stage_id.id == 16 and self.first_email == False: msg = msg + ' - 1st email (activation) \n' if self.stage_id.id == 16 and self.service_completed == False: msg = msg + ' - Services completed \n' if self.stage_id.id == 16 and self.price_completed == False: msg = msg + ' - Prices completed \n' if self.stage_id.id == 16 and self.cv_completed == False: msg = msg + ' - CV/experiences completed \n' if self.stage_id.id == 16 and self.duration_completed == False: msg = msg + ' - Duration completed \n' if self.stage_id.id == 16 and self.personal_message_completed == False: msg = msg + ' - Personal message completed \n' if self.stage_id.id == 16 and self.profile_picture == False: msg = msg + ' - Profile picture \n' if self.stage_id.id == 16 and self.photo_practice == False: msg = msg + ' - Photo Practice \n' if self.stage_id.id == 16 and self.marketing_kit == False: msg = msg + ' - Marketing kit \n' if self.stage_id.id == 16 and self.synchronisation_completed == False: msg = msg + ' - Synchronization \n' if self.stage_id.id == 16 and self.backlink == False: msg = msg + ' - Backlink \n' if self.stage_id.id == 16 and self.google_profile == False: msg = msg + ' - Google profile \n' if self.stage_id.id == 16 and self.voicemail == False: msg = msg + ' - Voicemail \n' if self.stage_id.id == 16 and self.mail_signature == False: msg = msg + ' - Mail signature \n' if self.stage_id.id == 16 and self.email_to_patient == False: msg = msg + ' - Email to patient \n' if self.stage_id.id == 16 and self.translation == False: msg = msg + ' - Translation \n' if self.stage_id.id == 16 and self.business_card == False: msg = msg + ' - Manuel Sent \n' if self.stage_id.id == 16 and self.manuel_sent == False: msg = msg + ' - Business cards \n' if self.stage_id.id == 16 and self.widget == False: msg = msg + ' - Widget \n' if self.stage_id.id == 16 and self.voice_mail == False: msg = msg + ' - Voicemail + email signature \n' if self.stage_id.id == 16 and self.website_ok == False: msg = msg + ' - Website \n' if self.stage_id.id == 16 and self.customer_service_number == False: msg = msg + ' - Customer service number on google profile \n' if self.stage_id.id == 16 and self.website_backlink == False: msg = msg + ' - Backlink on website \n' ## Lost paying, tab lost if self.stage_id.id == 17 and self.date_lost == False: msg = msg + ' - Lost Date \n' if self.stage_id.id == 17 and self.reason_lost == False: msg = msg + ' - Lost Reason \n' ## if msg: raise ValidationError('To move to this step you first need to fill those fields : \n' + msg) return {}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_stages(self, stages: Set[str], deployment_id: Optional[str]) -> Set[str]:\n prev_dep_ids = set()\n for stage in stages:\n # Collects previous deployment IDs to clean up\n response_get = cast(Dict, self._api_client.get_stage(restApiId=self._api_physical_id, stageName=stage))\n prev_dep_id = response_get.get(\"deploymentId\")\n if prev_dep_id:\n prev_dep_ids.add(cast(str, prev_dep_id))\n\n # Updates the stage with newest deployment\n LOG.debug(\"%sTrying to update the stage %s through client\", self.log_prefix, stage)\n response_upd = cast(\n Dict,\n self._api_client.update_stage(\n restApiId=self._api_physical_id,\n stageName=stage,\n patchOperations=[{\"op\": \"replace\", \"path\": \"/deploymentId\", \"value\": deployment_id}],\n ),\n )\n LOG.debug(\"%sUpdate Stage Result: %s\", self.log_prefix, response_upd)\n\n # Flushes the cache so that the changes are calleable\n self._api_client.flush_stage_cache(restApiId=self._api_physical_id, stageName=stage)\n self._api_client.flush_stage_authorizers_cache(restApiId=self._api_physical_id, stageName=stage)\n return prev_dep_ids", "def _collect_stages(self) -> Set[str]:\n # Get the stage name associated with the previous deployment and update stage\n # Stage needs to be flushed so that new changes will be visible immediately\n api_resource = get_resource_by_id(self._stacks, ResourceIdentifier(self._api_identifier))\n stage_resources = get_resource_ids_by_type(self._stacks, AWS_APIGATEWAY_STAGE)\n deployment_resources = get_resource_ids_by_type(self._stacks, AWS_APIGATEWAY_DEPLOYMENT)\n\n stages = set()\n # If it is a SAM resource, get the StageName property\n if api_resource:\n if api_resource.get(\"Type\") == AWS_SERVERLESS_API:\n # The customer defined stage name\n stage_name = api_resource.get(\"Properties\", {}).get(\"StageName\")\n if stage_name:\n stages.add(cast(str, stage_name))\n\n # The stage called \"Stage\"\n if stage_name != \"Stage\":\n response_sta = cast(Dict, self._api_client.get_stages(restApiId=self._api_physical_id))\n for item in response_sta.get(\"item\"): # type: ignore\n if item.get(\"stageName\") == \"Stage\":\n stages.add(\"Stage\")\n\n # For both SAM and ApiGateway resource, check if any refs from stage resources\n for stage_resource in stage_resources:\n # RestApiId is a required field in stage\n stage_dict = get_resource_by_id(self._stacks, stage_resource)\n if not stage_dict:\n continue\n rest_api_id = stage_dict.get(\"Properties\", {}).get(\"RestApiId\")\n dep_id = stage_dict.get(\"Properties\", {}).get(\"DeploymentId\")\n # If the stage doesn't have a deployment associated then no need to update\n if dep_id is None:\n continue\n # If the stage's deployment ID is not static and the rest API ID matchs, then update\n for deployment_resource in deployment_resources:\n if deployment_resource.resource_iac_id == dep_id and rest_api_id == self._api_identifier:\n stages.add(cast(str, stage_dict.get(\"Properties\", {}).get(\"StageName\")))\n break\n\n return stages", "def _patch_update_stages(\n self,\n stage_changes_list: list[dict[str, Any]],\n changed_fields: CHANGED_FIELDS_LIST_TYPE\n ) -> bool:\n stages: list[Stage] = []\n for change_info in stage_changes_list:\n stage_was_updated = False\n # Check if valid ID is provided and fetch stage if it exists.\n if 'id' not in change_info:\n self.abort(400, msg='Missing stage ID in stage updates')\n id = change_info['id']\n stage = Stage.get_by_id(id)\n if not stage:\n self.abort(400, msg=f'Stage not found for ID {id}')\n\n # Update stage fields.\n for field, field_type in api_specs.STAGE_FIELD_DATA_TYPES:\n if field not in change_info:\n continue\n form_field_name = change_info[field]['form_field_name']\n old_value = getattr(stage, field)\n new_value = change_info[field]['value']\n self._update_field_value(stage, field, field_type, new_value)\n changed_fields.append((form_field_name, old_value, new_value))\n stage_was_updated = True\n\n # Update milestone fields.\n milestones = stage.milestones\n for field, field_type in api_specs.MILESTONESET_FIELD_DATA_TYPES:\n if field not in change_info:\n continue\n if milestones is None:\n milestones = MilestoneSet()\n form_field_name = change_info[field]['form_field_name']\n old_value = getattr(milestones, field)\n new_value = change_info[field]['value']\n self._update_field_value(milestones, field, field_type, new_value)\n changed_fields.append((form_field_name, old_value, new_value))\n stage_was_updated = True\n stage.milestones = milestones\n\n if stage_was_updated:\n stages.append(stage)\n\n # Save all of the updates made.\n # Return a boolean representing if any changes were made to any stages.\n if stages:\n ndb.put_multi(stages)\n return True\n return False", "def stages(self):\n return self._stages", "def stages(self):\n return self._stages", "def getChanges():", "def get_stage_xyz(self):\n raise NotImplementedError", "def _read_group_stage_ids(self, stages, domain, order):\n stage_ids = self.env['salon.stage'].search([])\n return stage_ids", "def get_fill_stages(self) -> Tuple[str]:\n return self._fill_stages", "def stage_states(self) -> pulumi.Output[Sequence['outputs.ExecutionStageStateResponse']]:\n return pulumi.get(self, \"stage_states\")", "def store_old_table(self):\n\n for group in self.param_groups:\n for p in group['params']:\n gk = p.grad.data\n\n param_state = self.state[p]\n\n gktbl = param_state['gktbl']\n gavg = param_state['gavg']\n\n param_state['gktbl_old'] = gktbl.clone()\n param_state['gavg_old'] = gavg.clone()", "def list_value_changes(self, field_name):\n\n t = self.data['timestamp']\n x = self.data[field_name]\n indices = t != 0 # filter out 0 values\n t = t[indices]\n x = x[indices]\n if len(t) == 0: return []\n ret = [(t[0], x[0])]\n indices = np.where(x[:-1] != x[1:])[0] + 1\n ret.extend(zip(t[indices], x[indices]))\n return ret", "def changed_parameters(self):\n return self._changed_parameters", "def _simplify_stage_position_beta(self, stage_pos: dict):\n new_dict = {\n 'Label': stage_pos['label'],\n 'GridRow': stage_pos['gridRow'],\n 'GridCol': stage_pos['gridCol'],\n }\n\n for sub in stage_pos['subpositions']:\n values = []\n for field in ['x', 'y', 'z']:\n if sub[field] != 0:\n values.append(sub[field])\n if len(values) == 1:\n new_dict[sub['stageName']] = values[0]\n else:\n new_dict[sub['stageName']] = values\n\n return new_dict", "def stage_states(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExecutionStageStateArgs']]]]:\n return pulumi.get(self, \"stage_states\")", "def reset_stages(self) -> List[str]:\n return self._reset_stages", "def _simplify_stage_position(self, stage_pos: dict):\n out = copy(stage_pos)\n out.pop('DevicePositions')\n for dev_pos in stage_pos['DevicePositions']:\n out.update({dev_pos['Device']: dev_pos['Position_um']})\n return out", "def staged(self):\n return", "def publish_stage_changes(self, gateway_stack_name, live=False):\n rest_api_id = self.get_stack_restapi(gateway_stack_name)\n domain_name = self.get_stack_domain_name(gateway_stack_name)\n stack_id = self.get_stack_account(gateway_stack_name)\n if not domain_name:\n print \"[FAIL] No custom domain found.\"\n return False\n current_live_stage = self.get_live_stack(domain_name)\n\n if current_live_stage == 'green':\n to_stage = 'blue'\n elif current_live_stage == 'blue':\n to_stage = 'green'\n else:\n print \"[FAIL] Stage name not supported must be one of %s\" % str(self.aliases)\n sys.exit(1)\n\n if not live:\n print \"[INFO] Copying dev stage changes to pvt stage %s\" % to_stage\n self.update_stage(stack_id, rest_api_id, to_stage)\n self.create_deployment(rest_api_id, to_stage, \"Deployed by PyStacks\")\n else:\n print \"[INFO] Copying dev stage changes to live stage %s\" % current_live_stage\n self.update_stage(stack_id, rest_api_id, current_live_stage)\n self.create_deployment(rest_api_id, current_live_stage, \"Deployed by PyStacks\")", "def create_modified(self, mod_dicts):\n n_stages = len(self.stages)\n\n stages_mods = [mod_dict for mod_dict in mod_dicts if mod_dict[\"stage\"] < n_stages]\n tail_mods = [mod_dict for mod_dict in mod_dicts if mod_dict[\"stage\"] == n_stages]\n\n assert len([mod_dict for mod_dict in mod_dicts if mod_dict[\"stage\"] > n_stages]) == 0\n\n stages_dicts = [[] for _ in range(n_stages)]\n\n for mod in stages_mods:\n stages_dicts[mod[\"stage\"]].append(mod)\n\n new_stages = tuple(stage.create_modified(mods)\n for stage, mods in zip(self.stages, stages_dicts))\n\n new_tail = np.copy(self.tail)\n\n for mod in tail_mods:\n assert mod[\"parameter\"] == \"thres\"\n new_tail[mod[\"index\"]] = mod[\"value\"]\n\n return type(self)(new_stages, new_tail)", "def stage(self):\n pass", "def stage(self):\n return self._stage", "def stage(self):\n return self._stage", "def stage(self):\n return self._stage", "def sweep_stages(self) -> List[str]:\n return self._sweep_stages", "def increment_stage_in_forms(forms):\n for index, form in enumerate(forms.all(), 1):\n form.stage = index\n form.save(update_fields=['stage'])", "def get_stages(plan, problem_dic, problem_file, predicates_list):\r\n\r\n # Initial stage\r\n stages = problem_dic[0]['init'].copy()\r\n\r\n with open(problem_file) as file:\r\n text = file.read()\r\n objects = re.findall(r'\\b\\S+\\b', text[text.index(\"objects\")\r\n + len(\"objects\"):text.index(\"init\")])\r\n\r\n # Getting the list of actions from results returned from planning.domain api\r\n try:\r\n actionlist = plan['result']['plan']\r\n except KeyError:\r\n sys.exit(\"No plan have been returned\")\r\n cleanactionlist = remove_unused_char(actionlist)\r\n\r\n content = {\"stages\": [], \"objects\": objects}\r\n content['stages'].append({\"items\": stages.copy()})\r\n # 1. Go through the steps\r\n for counter in range(0, len(actionlist)):\r\n checklist = []\r\n init_object_list = server.PddLparser.visualiserFile.\\\r\n parser.problem_parser.\\\r\n get_object_list(predicates_list, cleanactionlist[counter])\r\n checklist = (init_object_list)\r\n\r\n # 2. Find the difference between 2 steps\r\n addactionlistarr = []\r\n removeactionlistarr = []\r\n for var in checklist:\r\n if var in stages:\r\n removeactionlistarr.append(var)\r\n else:\r\n addactionlistarr.append(var)\r\n\r\n # Append the list to get the final result\r\n for addvar in addactionlistarr:\r\n stages.append(addvar)\r\n for rmvar in removeactionlistarr:\r\n stages.remove(rmvar)\r\n\r\n # 3.\r\n # Append everything to get the final output - content\r\n result = {\"items\": stages.copy(),\r\n \"add\": addactionlistarr,\r\n \"remove\": removeactionlistarr}\r\n content['stages'].append(result)\r\n return content", "def get_proposed_values(self):\n return self.new_position, self.new_momentum, self.new_grad_logp", "def stage(self, stage_id):\r\n return pipelines.Stage(self, stage_id)", "def get_changed_columns(self):\r\n return [k for k,v in self._values.items() if v.changed]" ]
[ "0.6233984", "0.57401246", "0.5710629", "0.5523506", "0.5523506", "0.5495152", "0.54223007", "0.5403869", "0.53606635", "0.5297145", "0.5296984", "0.52794987", "0.52597815", "0.51409966", "0.51221114", "0.51093155", "0.5060696", "0.50557554", "0.50440127", "0.5032552", "0.50050354", "0.49854708", "0.49854708", "0.49854708", "0.49686602", "0.49645865", "0.4961817", "0.49350944", "0.49175158", "0.48977688" ]
0.6233945
1
get_battle and get_root work, right?
def test_get_battle(self): battle = self.battle s1 = battle.create_skirmish(self.alice, 1) s2 = battle.create_skirmish(self.bob, 1) s3 = s2.react(self.alice, 1) self.assertEqual(battle, s1.get_battle()) self.assertEqual(battle, s3.get_battle())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def battle(ctx):\n return await battle(ctx)", "def test_get_game(self):\n pass", "def test_get_player_battles(self):\n pass", "def test_root_get(self):\n pass", "def test_root_get(self):\n pass", "def battle_resting(self):\n pass", "def run_new_battle(self):\n self.init_battle()\n return self.run_battle()", "def test_gridironfootballplayers_get(self):\n pass", "def warriorBattle1_2():\n print(\"You charge the ogre with full force.\")\n print(\"You smash into the ogre,\")\n print(\"with the warrior strength the ogres feet leaves the ground\")\n print(\"You push the ogre straight into the cave wall,\")\n print(\"as the ogre smashes in to it, it collapses on the ground\")\n print(f\"The ogres health is now {ogre_data - 100}\")\n print(\"The ogre is dead\")\n print(f\"Your health is now {hero_data[0] - 80}\")\n get_warrior_loot()\n warriorPath1_1_1()", "def get_root(self) -> object:", "def itarate_the_recursion(battle_queue) -> List:\n\n score = None\n name = 1\n children = []\n m = [name, score, battle_queue, children]\n thing = Stack()\n thing.add(m)\n first_player = battle_queue.peek().get_name()\n\n\n while not thing.is_empty():\n\n x = thing.remove()\n\n\n if x[2].is_over():\n if x[2].get_winner():\n winner_hp = x[2].get_winner().get_hp()\n x[1] = winner_hp if x[2].get_winner().get_name() \\\n == first_player else winner_hp * -1\n else:\n x[1] = 0\n\n elif x[1] is None and x[3] != []:\n j = []\n for i in x[3]:\n j += [i[1]]\n x[1] = max(j)\n\n\n elif not x[2].is_over():\n thing.add(x)\n\n moves = x[2].peek().get_available_actions()\n\n for i in moves:\n name += 1\n\n clone = x[2].copy()\n next_char = clone.peek()\n\n mover(i, next_char)\n\n if not clone.is_empty():\n clone.remove()\n\n new_tree = [name, None, clone, []]\n\n x[3].append(new_tree)\n thing.add(new_tree)\n return m", "def lobby():\n return UncheckedPlace(\"Lobby\")", "def battleStage(self):\n # Continuously saves logging information to a text file:\n self.logs.close()\n self.logs = open(str(self.filepath)+\"/_logs/\"+ str(self.PORT) + \".txt\", \"a+\")\n\n print(\"(\" + str(self.HOST) + \", \" + str(self.PORT) +\"):: Entering battle stage\", file=self.logs)\n\n # Prepare the board for play; removes red and blue squares from the board\n self.game.normalizeBoard()\n \n # Sets players' moves per turn\n self.game.setPlayerMoves()\n\n # Keeps track of who can edit the board.\n activePlayer = self.players[0]\n\n gameState = {\n \"active\": activePlayer.getName(),\n \"game\": self.game\n }\n while True:\n inboundData = self.socket.recvfrom(1024) # Gets bundle of data from clients\n data = inboundData[0] # Separates data from address\n\n address = inboundData[1] # Separates address from data\n data = pickle.loads(data) # Unpickles data back into a python dict\n\n ########\n self.bitsIn += sys.getsizeof(data)\n\n # Keeps track of how often the server recieves information from each client.\n updatedTime = time.time() \n self.clientUpdateTimes[str(address)] = updatedTime\n\n # Keeps track of how often the server recieves information from each client.\n updatedTime = time.time() \n self.clientUpdateTimes[str(address)] = updatedTime\n\n if data['stage'] == 'battle':\n # Interacts with the game object, then sends the updated game back\n if self.game.isFinished() == True:\n print(\"\\n~~~ GAME OVER ~~~\", file=self.logs)\n print(\" Total bits in: %3.5f mb\"% (self.bitsIn/8000000), file=self.logs)\n print(\"Total bits out: %3.5f mb\"% (self.bitsOut/8000000), file=self.logs)\n break\n\n if data['command'] != None: # Only sends relevante data\n self.game.battleActions(data['command'], data['square'], data['moveSquare'])\n gameState['game'] = self.game\n\n # Packages up data and sends it back to the client\n outboundData = pickle.dumps(gameState)\n\n ######\n self.bitsOut += sys.getsizeof(outboundData)\n\n self.socket.sendto(outboundData, address)\n\n # Check client connections here\n self.checkClientConnections(time.time())", "def battle(first, second):\n\n print(get_catchphrase(first))\n print(get_catchphrase(second))\n\n if get_damage(second) > get_damage(first):\n return second\n else:\n return first", "def warriorBattle1_1():\n print(\"Before you can react the ogre runs at you,\")\n print(\"pushing you in to the wall.\")\n print(f\"Your health is now {hero_data[0] - 60}\")\n print(\"As you are trapped between the wall and the ogre,\")\n print(\"you manage to punch the ogre in the face.\")\n print(f\"The ogres health is now {ogre_data - 100}\")\n print(\"The ogre is dead\")\n get_warrior_loot()\n warriorPath1_1_1()", "def get_winner(game):\n return game['winner']", "def battle(bid):\n try:\n return template('battle.html', battle=SERVER.get_battle(bid))\n except RoboBattleshipException as e:\n return JsonResponse.error(e)\n except:\n LOG.exception(\"Failed to show battle with bid '%s'\", bid)\n return JsonResponse.error(101)", "def climb_tree():\n global UP_TREE\n westdesc = \"\"\n eastdesc = \"\"\n northdesc = \"\"\n southdesc = \"\"\n UP_TREE = True\n westinvalid = False\n eastinvalid = False\n northinvalid = False\n southinvalid = False\n\n\n printmessage(\"You climb the large tree to get a look at your surroundings.\", 5, MAGENTA, 2)\n\n if ZERO_BASE_PLYR_POS in range(0, 10):\n northinvalid = True\n if ZERO_BASE_PLYR_POS in range(90, 100):\n southinvalid = True\n if ZERO_BASE_PLYR_POS in range(0, 91, 10):\n eastinvalid = True\n if ZERO_BASE_PLYR_POS in range(9, 100, 10):\n westinvalid = True\n \n if not westinvalid: \n westpos = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS - 1]\n if HAS_COMPASS: \n DISCOVERED[ZERO_BASE_PLYR_POS + 1] = \"Y\"\n if westpos == 10: # Water\n westdesc = TREE_VIEWS[2]\n else:\n westdesc = TREE_VIEWS[1]\n\n westpos = ENEMY_LIST[ZERO_BASE_PLYR_POS - 1]\n if westpos == 1:\n westdesc = TREE_VIEWS[3]\n elif westpos == 2:\n westdesc = TREE_VIEWS[4]\n else:\n westdesc = TREE_VIEWS[5]\n\n if not eastinvalid:\n eastpos = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS + 1]\n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS - 1] = \"Y\"\n if eastpos == 10: # Water\n eastdesc = TREE_VIEWS[2]\n else:\n eastdesc = TREE_VIEWS[1]\n\n eastpos = ENEMY_LIST[ZERO_BASE_PLYR_POS + 1]\n if eastpos == 1:\n eastdesc = TREE_VIEWS[3]\n elif eastpos == 2:\n eastdesc = TREE_VIEWS[4]\n else:\n eastdesc = TREE_VIEWS[6]\n\n\n if not northinvalid:\n northpos = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS - 10]\n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS - 10] = \"Y\"\n if northpos == 10: # Water\n northdesc = TREE_VIEWS[2]\n else:\n northdesc = TREE_VIEWS[1]\n\n northpos = ENEMY_LIST[ZERO_BASE_PLYR_POS - 10]\n if northpos == 1: # bear\n northdesc = TREE_VIEWS[3]\n elif northpos == 2: # grizzly\n northdesc = TREE_VIEWS[4]\n else:\n northdesc = TREE_VIEWS[7]\n\n\n if not southinvalid:\n southpos = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS + 10]\n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS + 10] = \"Y\"\n if southpos == 10: # Water\n southdesc = TREE_VIEWS[2]\n else:\n southdesc = TREE_VIEWS[1]\n\n southpos = ENEMY_LIST[ZERO_BASE_PLYR_POS + 10]\n if southpos == 1: # bear\n southdesc = TREE_VIEWS[3]\n elif southpos == 2: # grizzly\n southdesc = TREE_VIEWS[4]\n else:\n southdesc = TREE_VIEWS[8]\n\n clear_messages(0)\n printmessage(\"West: \" + westdesc, 2, GREEN, 0)\n printmessage(\"East: \" + eastdesc, 3, YELLOW, 0)\n printmessage(\"North: \" + northdesc, 4, CYAN, 0)\n printmessage(\"South: \" + southdesc, 5, MAGENTA, 0)\n #show_movement(True, 10)\n update_player_on_map()\n pause_for_keypress()\n clear_messages(0)", "def main(player):\n saved_score = 0\n rat_array = [\"reset\"]\n current_fight = \"\"\n while player.hp >= 1:\n\n system.clear_screen()\n if player.location == route_list[0]:\n pass\n else:\n rat_array = []\n rat_chance = randint(1, 100)\n if rat_chance >= 50:\n rat_array = system.npc_swarm_spawn()\n else:\n # must reset here, or a sub 50 roll crashes with no rat_array found\n rat_array = [\"reset\"]\n pass\n if player.location == current_fight:\n rat_array = [\"reset\"]\n else:\n pass\n\n # encounter spawn gotta go somewhere how bout here\n system.encounter_chance(player)\n\n status_array = system.status_message(route_list, player, rat_array)\n print(f\"{status_array[0]}\\n{status_array[1]}\")\n\n movement_options = system.movement_options(route_list, player)\n print(\"\\nAdjacent systems to your current location are:\")\n for movement_option in movement_options:\n print(movement_option)\n if len(movement_options) == 1:\n print(\n f\"\\nWhat is your decision? \\n\\nAvailable commands are {movement_options[0]}, \"\n + \"or type 'rat' to shoot rats.\"\n )\n else:\n print(\n f\"\\nWhat is your decision? \\n\\nAvailable commands are {movement_options[0]}, \"\n + f\"{movement_options[1]} or type 'rat' to shoot rats.\"\n )\n try:\n player_action = str(input())\n except ValueError:\n print(\"You spin your ship.\")\n\n action = system.parse_input(player_action, movement_options, player)\n # print(rat_array)\n if action.lower() == \"rat\":\n if rat_array[0] != \"reset\":\n # print('fightin')\n system.rat_fight(rat_array, player)\n # system.clear_screen()\n try:\n for rat_item in rat_array:\n rat_array[rat_item].remove()\n rat_array = [\"reset\"]\n current_fight = player.location\n except:\n rat_array = [\"reset\"]\n current_fight = player.location\n\n if player.location == destination_system:\n print(\n f\"\\n\\nCongratulations, you have arrived at {player.location}. \"\n + \"\\nYou may now set a new destination, or dock up and use your points you've gained to reship. \"\n + \"\\nOr you may choose to either hold onto your points, in which case they might be lost on death \"\n + \"or save them to buy bigger and better ships\"\n + \"\\no7 capsuleer the system is clear. \"\n + f\"\\n\\nYour final score from this trip was {player.score}\")\n saved_score += player.score\n\n if(player.hp < 1):\n print(\n f\"\\n\\nYour ship explodes in to tiny pieces at the stargate in {player.location}. \"\n + \"\\nYour capsule containing your body shatters from the force of the explosion. \"\n + \"\\nYou are dead. You wake up in your hangar where your death clone is set to and \"\n + \"prepare to voyage out once again. \"\n + \"\\no7 capsuleer the cyno is now lit. \"\n + f\"\\n\\nYour final score was {player.score}\"\n )", "def minimaxLocalSearch(gamestate, depth, timeTotal, alpha, beta, maxEntity):\n bonus = 0\n isTerminalState = gamestate.board.checkTerminalState(gamestate.currentPlayer.noPlayer)\n # Basis Rekursif\n if ((depth == 0) or (time.time() > timeTotal) or (isTerminalState)):\n if (isTerminalState) and (gamestate.currentPlayer.noPlayer == maxEntity):\n bonus = 10\n elif (isTerminalState) and (gamestate.currentPlayer.noPlayer != maxEntity):\n bonus = -10\n return gamestate, U_Function(gamestate.currentPlayer, gamestate.oppositePlayer, gamestate.board.size, maxEntity) + bonus\n\n # Rekurens\n if (gamestate.currentPlayer.noPlayer == maxEntity):\n # Choose the maximum utility of the state\n # Iterate all pion and its possible moves\n maxGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n maxValue = -math.inf\n\n # Iterate all pion index\n for idx in range(len(gamestate.currentPlayer.arrayPion)):\n all_possible_moves = gamestate.currentPlayer.listAllPossibleMove(idx, gamestate.board)\n\n # Choose the best move from local search heuristic\n if (len(all_possible_moves) > 0):\n move = getBestMove(all_possible_moves, gamestate)\n newGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n newGameState.currentPlayer.movePion(idx, move, newGameState.board)\n \n recursiveState = GameState.GameState(newGameState.board, newGameState.currentPlayer, newGameState.oppositePlayer)\n recursiveState.nextTurn()\n dummyState, utility = minimaxLocalSearch(recursiveState, depth-1, timeTotal, alpha, beta, maxEntity)\n\n # Compare with the old max value\n if (utility > maxValue):\n maxValue = utility\n maxGameState = newGameState\n \n alpha = max(alpha, maxValue)\n if (beta <= alpha):\n return maxGameState, maxValue\n\n return maxGameState, maxValue\n\n else:\n # Choose the minimum utility of the state\n minGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n minValue = math.inf\n\n # Iterate all pion index\n for idx in range(len(gamestate.currentPlayer.arrayPion)):\n all_possible_moves = gamestate.currentPlayer.listAllPossibleMove(idx, gamestate.board)\n\n if (len(all_possible_moves) > 0):\n # Choose the best move from local search heuristic\n move = getBestMove(all_possible_moves, gamestate)\n newGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n newGameState.currentPlayer.movePion(idx, move, newGameState.board)\n\n recursiveState = GameState.GameState(newGameState.board, newGameState.currentPlayer, newGameState.oppositePlayer)\n recursiveState.nextTurn()\n dummyState, utility = minimaxLocalSearch(recursiveState, depth-1, timeTotal, alpha, beta, maxEntity)\n\n # Compare with the old min value\n if (utility < minValue):\n minValue = utility\n minGameState = newGameState\n \n beta = min(beta, minValue)\n if (beta <= alpha):\n return minGameState, minValue\n \n return minGameState, minValue", "def findAttackRoot(root,edgesList,nodesList):\n children = findChildren(root,edgesList,nodesList)\n for node in children:\n if node[\"key\"][0] != \"S\":\n return node\n print(\"Error:: Cannot find attack root node\")\n return root", "def run_UCT(self, game, actions_taken):\n \n # If current tree is null, create one using game\n if self.root == None:\n self.root = Node(game.clone())\n # If it's not, check if there are actions from actions_taken to update\n # the tree from this player.\n # If the list is not empty, update the root accordingly.\n # If the list is empty, that means this player hasn't passed the turn,\n # therefore the root is already updated.\n elif len(actions_taken) != 0:\n for i in range(len(actions_taken)):\n # Check if action from history is in current root.children.\n if actions_taken[i][0] in self.root.children:\n # Check if the actions are made from the same player\n child_node = self.root.children[actions_taken[i][0]]\n if child_node.state.player_turn == actions_taken[i][1] \\\n and set(self.root.state.available_moves()) \\\n == set(game.available_moves()):\n self.root = child_node\n else:\n self.root = None\n self.root = Node(actions_taken[i][2].clone())\n else:\n self.root = None\n self.root = Node(actions_taken[i][2].clone())\n # This means the player is still playing (i.e.: didn't choose 'n').\n else:\n # Therefore, check if current root has the same children as \"game\"\n # offers. If not, reset the tree.\n if set(self.root.children) != set(game.available_moves()):\n self.root = None\n self.root = Node(game.clone())\n\n #Expand the children of the root if it is not expanded already\n if not self.root.is_expanded():\n self.expand_children(self.root)\n\n root_state = self.root.state.clone()\n\n for _ in range(self.n_simulations):\n node = self.root\n node.state = root_state.clone()\n search_path = [node]\n while node.is_expanded():\n action, new_node = self.select_child(node)\n node.action_taken = action\n search_path.append(new_node)\n node = new_node\n # At this point, a leaf was reached.\n # If it was not visited yet, then perform the rollout and\n # backpropagates the reward returned from the simulation.\n # If it has been visited, then expand its children, choose the one\n # with the highest ucb score and do a rollout from there.\n if node.n_visits == 0:\n rollout_value = self.rollout(node)\n self.backpropagate(search_path, rollout_value)\n else:\n _, terminal_state = node.state.is_finished()\n # Special case: if \"node\" is actually a leaf of the game (not \n # a leaf from the current tree), then only rollout should be \n # applied since it does not make sense to expand the children\n # of a leaf.\n if terminal_state:\n rollout_value = self.rollout(node)\n self.backpropagate(search_path, rollout_value)\n else:\n self.expand_children(node)\n action, new_node = self.select_child(node)\n node.action_taken = action\n search_path.append(new_node)\n node = new_node\n rollout_value = self.rollout(node)\n self.backpropagate(search_path, rollout_value)\n \n dist_probability = self.distribution_probability(game)\n action = self.select_action(game, self.root, dist_probability)\n q_a_root = self.root.q_a\n self.root = self.root.children[action]\n # Remove the statistics of the chosen action from the chosen child\n if self.root.n_a:\n self.root.n_a.pop(action)\n if self.root.q_a:\n self.root.q_a.pop(action)\n return action, dist_probability, q_a_root", "def test_brains_get(self):\n pass", "def arena():\r\n global monsters_defeated, arena_boss\r\n\r\n if not arena_boss:\r\n arena_boss = items_lists.random_monster('boss_monsters')\r\n print '<arena boss = ' + str(arena_boss) + '>\\n'\r\n\r\n raw_input(\"You enter a terrifyingly massive arena.\\n\")\r\n raw_input(\"Thousands of bloodthirsty fans are screaming your name.\\n\")\r\n raw_input(\"Suddenly, the doors behind you close with a slam.\\n\")\r\n raw_input(\"There's no escape.\\n\")\r\n\r\n boss_win = False\r\n arena_monsters_encountered = 0\r\n\r\n while player.get_health() > 0 and not boss_win:\r\n arena_monsters_encountered += 1\r\n if arena_monsters_encountered % 10:\r\n fight()\r\n else:\r\n boss_fight = fight(arena_boss)\r\n if boss_fight == 'win':\r\n boss_win = True\r\n\r\n if boss_win:\r\n raw_input(\"The dying monster's body crashes through the floor, revealing \" + \\\r\n \"a cavernous tunnel underneath the arena...\\n\")\r\n raw_input(\"The fans are getting restless, demanding another fight. You seize \" + \\\r\n \"your opportunity quickly and enter the enormous tunnel.\\n\")\r\n return 'main_tunnel'\r\n\r\n else:\r\n raw_input('You defeated ' +str(monsters_defeated) + ' monsters before dying a grisly death!\\n')\r\n return 'death'", "def custom_deeper(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n\n own_moves = game.get_legal_moves(player)\n own_score = 0\n for move in own_moves:\n newgame = game.forecast_move(move)\n newmoves = newgame.get_legal_moves(player)\n own_score = len(newmoves)\n\n opp_moves = game.get_legal_moves(game.get_opponent(player))\n opp_score = 0\n for move in opp_moves:\n newgame = game.forecast_move(move)\n newmoves = newgame.get_legal_moves(game.get_opponent(player))\n opp_score = len(newmoves)\n return float(own_score - opp_score)", "def fight(fighters):\n return {}", "def utility(board):\n return utility_map[winner(board)]", "def woods_1_0():\r\n global woods_1_0_name,map_1\r\n map_1 = '| X |' + map_1[12:]\r\n try:\r\n print \"You enter \" + woods_1_0_name + \".\\n\"\r\n except:\r\n \r\n woods_1_0_name = words.woods_name()\r\n print \"You enter \" + woods_1_0_name + \".\\n\"\r\n\r\n monsta_here = round(random.random() +.3)\r\n if monsta_here:\r\n result = encounter_monster(['small_monsters','medium_monsters'])\r\n if result == 'death':\r\n return result\r\n else:\r\n raw_input(\"There's a giant colosseum in the distance...\\n\")\r\n \r\n next = (\r\n ['woods_0_0','woods_1_1','woods_1_n1','arena'],\r\n [\"West\",\"North\",\"South\",\"Arena\"]\r\n )\r\n\r\n return pick_place(next,'Where to next?')", "def load_default_game():\n global width, height, dungeon_map # DO NOT REMOVE\n width = 5\n height = 3\n dungeon_map = [list(\"&.@:=\"), list(\" \"), list(\"OYO k\")]\n return (\n 2, # player x\n 1, # player y\n '>', # player symbol\n {'+': 1}, # inventory\n 0, # werewolf x\n 1, # werewolf y\n 1, # werewolf health\n 0, # werewolf stun count\n )", "def action(self):\n\n # assume the smart opponent can always choose the best step\n # Depth First Search\n steps = 2\n stack = [(self.game_in_head, (), 0)]\n maxmin = None\n good_paths = []\n\n while len(stack) > 0:\n parent_node, path, score = stack.pop(-1)\n if len(path) >= steps*2:\n \n # leaf node in the search tree\n if maxmin is None:\n maxmin = score\n good_paths.append(path)\n elif maxmin == score:\n good_paths.append(path)\n elif maxmin < score:\n maxmin = score\n good_paths.clear()\n good_paths.append(path)\n else:\n # root node, find its leaves\n children_nodes = self.one_step_infe(parent_node, path, score)\n stack += children_nodes\n\n path_dec = random.choice(good_paths) \n if self.colour == 'upper':\n return path_dec[0] \n elif self.colour == 'lower':\n return path_dec[1]" ]
[ "0.605957", "0.5823376", "0.5775074", "0.57509583", "0.57509583", "0.56359965", "0.55681175", "0.5435227", "0.5349742", "0.5342036", "0.5325338", "0.527473", "0.52361184", "0.5203507", "0.520066", "0.5183148", "0.5136757", "0.51365817", "0.5120829", "0.5114948", "0.5092223", "0.5074444", "0.50605977", "0.50585324", "0.50572264", "0.50422895", "0.5036248", "0.5033014", "0.5016951", "0.50149924" ]
0.62494785
0
Setting a troop cap should work
def test_troop_cap(self): self.conf["game"]["troopcap"] = 106 self.assertEqual(self.alice.loyalists, 100) self.assertEqual(self.bob.loyalists, 100) s1 = self.battle.create_skirmish(self.alice, 50) s1.react(self.bob, 50, troop_type='cavalry') self.end_battle(self.battle, self.conf) # Bob wins the fight and the war self.assertEqual(self.battle.victor, self.bob.team) # Alice's 10% reward puts her under cap self.assertEqual(self.alice.loyalists, 105) # Bob's 15% reward puts him over self.assertEqual(self.bob.loyalists, 106)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_capacity(self, cap):\n return self.get_interaction().set_capacity(cap)", "async def set_captain(self, cap : Player, team):\r\n self.captains[team.upper()] = cap\r\n await self.add_to_team(cap,team)\r\n self.order.append(cap)", "def set_pop_cap(self):\n v = simpledialog.askinteger(\n \"Settings\", \"Population cap:\",\n initialvalue=self.lnp.settings.popcap, parent=self.root)\n if v is not None:\n self.lnp.set_option('popcap', v)\n self.update_displays()", "def set_capacity(self, cap):\n self._capacity.type = 'value'\n self._capacity._value = float(cap) # TODO getter/setter", "def level_cap(self, level_cap):\n\n self._level_cap = level_cap", "def AssignTopology(self, topo, cap=1.0 * Gb):\r\n self.topo = topo\r\n self.topo.SetAllCapacity(cap)\r\n # We can get and set topo info:\r\n # node_2 = self.topo.nodes[2] #2 is node id\r\n # link_3_2 = self.topo.link[3,2] #(3,2) is link id\r\n # self.topo.SetLinkCapacity((5, 7), 10.0 * Gb) #set link_5_7 with capacity 10Gbps\r", "def set_child_cap(self):\n child_split = list(self.lnp.settings.childcap.split(':'))\n child_split.append('0') # In case syntax is invalid\n v = simpledialog.askinteger(\n \"Settings\", \"Absolute cap on babies + children:\",\n initialvalue=child_split[0], parent=self.root)\n if v is not None:\n v2 = simpledialog.askinteger(\n \"Settings\", \"Max percentage of children in fort:\\n\"\n \"(lowest of the two values will be used as the cap)\",\n initialvalue=child_split[1], parent=self.root)\n if v2 is not None:\n self.lnp.set_option('childcap', str(v)+':'+str(v2))\n self.update_displays()", "def capacidad(self, capacidad: int):\n\n self._capacidad = capacidad", "def AssignTopology(self, topo, cap=1.0 * Gb):\n self.topo = topo\n self.topo.SetAllCapacity(cap)\n # We can get and set topo info:\n # node_2 = self.topo.nodes[2] #2 is node id\n # link_3_2 = self.topo.link[3,2] #(3,2) is link id\n # self.topo.SetLinkCapacity((5, 7), 10.0 * Gb) #set link_5_7 with capacity 10Gbps", "def test_change_provisioned_throughput_usual_case():", "def test_create_hyperflex_proxy_setting_policy(self):\n pass", "def __init__(self, prize_option):\n self.chaser_i = 0\n self.player_i = 1 + prize_option # prize_option=1/2/3", "def default_capabilities(self):", "async def _bail_setheist(self, ctx, cost: int):\r\n guild = ctx.guild\r\n config = await self.thief.get_guild_settings(guild)\r\n theme = await self.thief.config.guild(guild).Theme()\r\n t_bail = theme[\"Bail\"]\r\n if cost >= 0:\r\n config[\"Bail Base\"] = cost\r\n await self.thief.config.guild(guild).Config.set(config)\r\n msg = \"Setting base {} cost to {}.\".format(t_bail, cost)\r\n else:\r\n msg = \"Need a number higher than -1.\"\r\n await ctx.send(msg)", "def caput(PV, value):\n epics.caput(PV, value)", "def _capacity_rule(self, prod_name, r, cap, m, t):\n prod = getattr(m, prod_name)\n # note that a negative capacity means a CONSUMPTION capacity instead of PRODUCTION\n if cap > 0:\n return prod[r, t] <= cap\n else:\n return prod[r, t] >= cap", "def test_operate_resource_cap_max(self, on):\n\n if on is False:\n override = {}\n else:\n override = {\"techs.test_supply_plus.constraints.resource_cap_max\": 1e6}\n m = build_model(\n override, \"simple_supply_and_supply_plus,operate,investment_costs\"\n )\n\n with pytest.warns(exceptions.ModelWarning) as warning:\n m.run(build_only=True)\n if on is False:\n assert check_error_or_warning(\n warning, \"Resource capacity constraint defined and set to infinity\"\n )\n assert np.isinf(\n m._model_data.resource_cap.loc[\"a\", \"test_supply_plus\"].item()\n )\n elif on is True:\n assert not check_error_or_warning(\n warning, \"Resource capacity constraint defined and set to infinity\"\n )\n assert m._model_data.resource_cap.loc[\"a\", \"test_supply_plus\"].item() == 1e6", "def test_change_default_throttling_settings_http_with_overwrite_throttled_rate_above_50():", "def __init__(self):\n self.canchooseservicecapability = None\n \"\"\"\"the capability name\"\"\"\n self.name = None\n \"\"\"\"the capability value\"\"\"\n self.value = None", "def test_change_default_throttling_settings_http_with_overwrite_throttled_rate_above_account_quota():", "def caput(PVName, val):\n\tpv = Pv(PVName)\n\tpv.connect(timeout=10.0)\n\tpv.put(value=val, timeout=10.0)\n\tpv.disconnect()", "def set_capabilities(self, capabilities: WlSeat.capability) -> None:\n lib.wlr_seat_set_capabilities(self._ptr, capabilities)", "def test_add_capability():\n mock = MagicMock()\n with patch.dict(dism.__salt__, {\"cmd.run_all\": mock}):\n with patch.dict(dism.__grains__, {\"osversion\": 10}):\n dism.add_capability(\"test\")\n mock.assert_called_once_with(\n [\n dism.bin_dism,\n \"/Quiet\",\n \"/Online\",\n \"/Add-Capability\",\n \"/CapabilityName:test\",\n \"/NoRestart\",\n ]\n )", "def test_loc_techs_resource_capacity_constraint(self, override):\n\n if override is None:\n m = build_model(\n {}, \"simple_supply_and_supply_plus,two_hours,investment_costs\"\n )\n m.run(build_only=True)\n expr = m._backend_model.resource_cap[(\"b\", \"test_supply_plus\")]\n assert expr.lb == 0\n assert expr.ub == np.inf\n\n else:\n m = build_model(\n {\n \"techs.test_supply_plus.constraints.resource_cap_{}\".format(\n override\n ): 10\n },\n \"simple_supply_and_supply_plus,two_hours,investment_costs\",\n )\n m.run(build_only=True)\n expr = m._backend_model.resource_cap[(\"b\", \"test_supply_plus\")]\n if override == \"max\":\n assert expr.ub == 10\n assert expr.lb == 0\n elif override == \"equals\":\n assert expr.ub == 10\n assert expr.lb == 10\n if override == \"min\":\n assert expr.lb == 10\n assert expr.ub == np.inf", "def test_patch_bios_policy(self):\n pass", "def test_add_capability_with_extras():\n mock = MagicMock()\n with patch.dict(dism.__salt__, {\"cmd.run_all\": mock}):\n with patch.dict(dism.__grains__, {\"osversion\": 10}):\n dism.add_capability(\"test\", \"life\", True)\n mock.assert_called_once_with(\n [\n dism.bin_dism,\n \"/Quiet\",\n \"/Online\",\n \"/Add-Capability\",\n \"/CapabilityName:test\",\n \"/Source:life\",\n \"/LimitAccess\",\n \"/NoRestart\",\n ]\n )", "def assign(self,player,hcp):\n\n # Higher hcp = higher bonus potention (max 100)\n assert hcp <= 100, 'Skill handicap cannot be >100 hcp : {0}'.format(\n hcp)\n\n if self.level is not None:\n base,bonus = RandomRoll(player,self,hcp)\n\n if base and bonus:\n self.level += random.randint(3)+1\n elif base:\n self.level += random.randint(2)", "def control_opt(self):\n\n\n if self.run_opt['refine']:\n self.run_opt['relaunch']=1\n \n #check value for 'madweight_main'\n for i in range(3,9)+[-1,-3]:\n if self.run_opt[num_to_tag[i]]==1:\n self.run_opt['madweight_main']=1\n break\n\n if self.run_opt['relaunch']==1:\n self.run_opt['control']=1", "def test_change_default_throttling_settings_http_with_overwrite_throttled():", "def test_switch_vlan_pool_default(self):\n self.assertEqual(self.options.vlan_pool, {})" ]
[ "0.66989356", "0.613216", "0.6131752", "0.56685257", "0.5661737", "0.54281896", "0.53787345", "0.5358742", "0.53274834", "0.5327376", "0.5314123", "0.53038776", "0.52905774", "0.52834314", "0.52774733", "0.5231526", "0.51952356", "0.5183654", "0.5179682", "0.51791334", "0.51530915", "0.5124855", "0.5112892", "0.5105682", "0.5103997", "0.50981945", "0.50798935", "0.507959", "0.50639826", "0.5035092" ]
0.68233424
0
Each participant can only make one toplevel skirmish
def test_single_toplevel_skirmish_each(self): self.battle.create_skirmish(self.alice, 1) with self.assertRaises(db.InProgressException): self.battle.create_skirmish(self.alice, 1) n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None). filter_by(participant=self.alice)).count() self.assertEqual(n, 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_no_rookies_toplevel(self):\n self.bob.recruited = now() + 6000\n\n # Top level\n with self.assertRaises(db.TimingException):\n self.battle.create_skirmish(self.bob, 1)\n\n self.assertEqual(self.sess.query(db.SkirmishAction).count(), 0)", "def test_disallow_betrayal(self):\n s1 = self.battle.create_skirmish(self.alice, 1)\n with self.assertRaises(db.TeamException):\n s1.react(self.bob, 1, hinder=False)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 1)", "def test_no_adds_to_overdraw_skirmish(self):\n s1 = self.battle.create_skirmish(self.alice, 99)\n with self.assertRaises(db.InsufficientException):\n s1.react(self.alice, 2, hinder=False)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 1)", "def test_no_overdraw_skirmish(self):\n with self.assertRaises(db.InsufficientException):\n self.battle.create_skirmish(self.alice, 9999999)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 0)", "def subectIsSelf():", "def claim_scene(self):\n targ = self.caller.search(self.lhs)\n if not targ:\n return\n try:\n cannot_claim = bool(targ.fakename)\n except AttributeError:\n cannot_claim = True\n messagelist = list(self.scenelist) + list(self.newbies) + list(self.gms)\n err = \"\"\n if targ == self.caller or cannot_claim:\n err = \"You cannot claim '%s'.\" % self.lhs\n elif not self.rhs:\n err = \"You must include some summary of the scene. It may be quite short.\"\n elif targ in self.claimlist:\n err = \"You have already claimed a scene with %s this week.\" % self.lhs\n elif targ not in messagelist:\n err = (\n \"%s is not in your list of random scene partners this week.\" % self.lhs\n )\n if err:\n self.msg(err)\n return\n requests = targ.db.scene_requests or {}\n tup = (self.caller, self.rhs)\n name = self.caller.name\n from server.utils.arx_utils import strip_ansi\n\n name = strip_ansi(name)\n requests[name.lower()] = tup\n targ.db.scene_requests = requests\n msg = (\n \"%s has submitted a RP scene that included you, for which you have received xp. \"\n % name\n )\n msg += \"Validating it will grant them xp.\"\n msg += \"\\n\\nTheir summary of the scene was the following: %s\" % self.rhs\n msg += \"\\nIf you ignore this request, it will be wiped in weekly maintenance.\"\n msg += \"\\nTo validate, use {w@randomscene/validate %s{n\" % name\n msg += \"\\n{rYou are already flagged for xp, and are not penalized in any way for ignoring a request \"\n msg += \"from someone who did not meaningfully interact with you.{n\"\n targ.player_ob.inform(msg, category=\"Validate\")\n inform_staff(\n \"%s has completed this random scene with %s: %s\"\n % (self.caller.key, targ, self.rhs)\n )\n self.msg(\n \"You have sent %s a request to validate your scene: %s\"\n % (self.lhs, self.rhs)\n )\n our_requests = self.requested_validation\n our_requests.append(targ)\n self.caller.player_ob.db.requested_validation = our_requests\n if targ in self.scenelist:\n self.scenelist.remove(targ)", "def test_stop_hitting_yourself(self):\n s1 = self.battle.create_skirmish(self.alice, 1)\n with self.assertRaises(db.TeamException):\n s1.react(self.alice, 1, hinder=True)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 1)", "def test_empty_private_owned(self):\n self.do_visible(True, 'pattieblack', False, is_admin=True)", "def enter_night_club(individual):\n if individual.age > LEGAL_DRINKING_AGE:\n print(\"Allowed to enter.\")\n else:\n print(\"Enterance of minors is denited.\")", "def test_skirmish_parenting(self):\n root = SkirmishAction()\n a1 = SkirmishAction()\n a2 = SkirmishAction()\n self.sess.add_all([root, a1, a2])\n self.sess.commit()\n\n root.children.append(a1)\n root.children.append(a2)\n self.sess.commit()\n\n self.assertEqual(a1.parent_id, root.id)\n self.assertEqual(a2.parent_id, root.id)", "def test_draft_story_must_be_visible_to_owner(self):\n self.assertEqual(self.ds.is_visible_for(self.au), False)\n\n \"\"\" Draft story must not be visible for another. \"\"\"\n self.assertEqual(self.ds.is_visible_for(self.u2), False)\n\n \"\"\" Draft story must be visible for story owner. \"\"\"\n self.assertEqual(self.ds.is_visible_for(self.u1), True)\n\n \"\"\" Draft story must not be visible for a blocked user. \"\"\"\n self.assertEqual(self.ds.is_visible_for(self.u3), False)", "def test_anon_public_owned(self):\n self.do_visible(True, 'pattieblack', True)", "def test_empty_public_owned(self):\n self.do_visible(True, 'pattieblack', True, is_admin=True)", "def on_main_action(self, e):\n if self.app.roster.IsShown():\n wx.PostEvent(self.app.roster, ev.HideRoster())\n else:\n wx.PostEvent(self.app.roster, ev.ShowRoster())", "def test_battle_skirmish_assoc(self):\n battle = self.battle\n\n s1 = battle.create_skirmish(self.alice, 1)\n s2 = battle.create_skirmish(self.bob, 1)\n\n s3 = s2.react(self.alice, 1)\n\n self.assertEqual(len(battle.skirmishes), 3)\n self.assertIn(s1, battle.skirmishes)\n self.assertIn(s2, battle.skirmishes)\n # s3 should inherit its battle from its parents\n self.assertIn(s3, battle.skirmishes)\n\n self.assertEqual(s1.battle, battle)", "def test_disallow_absent_fighting(self):\n londo = self.get_region(\"Orange Londo\")\n self.alice.region = londo\n self.sess.commit()\n\n with self.assertRaises(db.NotPresentException):\n self.battle.create_skirmish(self.alice, 1)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 0)", "def roi_slough(self):\n print(\"controller - roi_slough!\")\n self.view.processing_gui.ask_zone_type(\"Slough\")", "def test_anon_private_owned(self):\n self.do_visible(False, 'pattieblack', False)", "def test_ended_skirmishes_block(self):\n skirmish, _ = self.start_endable_skirmish()\n self.end_skirmish(skirmish)\n\n with self.assertRaises(db.InProgressException):\n self.battle.create_skirmish(self.alice, 1)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 1)", "def only_one_session(prefs, caps):\n rooms = tuple(caps.keys())\n # for any pair of sessions, each student can only be in one session\n return [\n [('%s_%s' % (stud,r1), False),\n ('%s_%s' % (stud,r2), False)]\n for stud in prefs \n for r1,r2 in combos(rooms)]", "def __init__(self, scenario, player_side):\n self.scenario = scenario\n self.player_side = player_side\n if scenario_id[self.scenario] == 10:\n self.assigned_f15_to_missions = False\n self.refueled = []", "def test_published_story_must_be_visible_for_everyone_but_blocked(self):\n self.assertEqual(self.ps.is_visible_for(self.au), True)\n\n \"\"\" Published story must be visible for another.\"\"\"\n self.assertEqual(self.ps.is_visible_for(self.u2), True)\n\n \"\"\" Publsihed story must be visible for owner. \"\"\"\n self.assertEqual(self.ps.is_visible_for(self.u1), True)\n\n \"\"\" Draft story must not be visible for a blocked user. \"\"\"\n self.assertEqual(self.ds.is_visible_for(self.u3), False)", "def test_ejection_after_battle(self):\n self.battle.submission_id = \"TEST\" # So update_all will work correctly\n\n old_bob_region = self.bob.region\n old_alice_region = self.alice.region\n self.battle.create_skirmish(self.alice, 5)\n\n self.end_battle()\n\n self.assertEqual(self.battle.victor, self.alice.team)\n\n self.assertNotEqual(self.bob.region, self.alice.region)\n self.assertNotEqual(self.bob.region, old_bob_region)\n self.assertEqual(self.alice.region, old_alice_region)", "def __init__(self,screen):\n\t\tsuper(LevelOne,self).__init__(screen)\n\t\tself.villain_one = None\n\t\tself._set_villain()", "def test_anon_shared(self):\n self.do_sharable(False, 'pattieblack', None)\n self.do_sharable(False, 'pattieblack', FakeMembership(True))", "def validate_scene(self):\n scene_requests = self.caller.db.scene_requests or {}\n name = self.args.lower()\n targ = scene_requests.pop(name, (None, \"\"))[0]\n self.caller.db.scene_requests = scene_requests\n if not targ:\n self.msg(\"No character by that name has sent you a request.\")\n self.view_requests()\n return\n validated = self.caller.player_ob.db.validated_list or []\n claimed = targ.player_ob.db.claimed_scenelist or []\n claimed.append(self.caller)\n targ_scenelist = targ.player_ob.db.random_scenelist or []\n if self.caller in targ_scenelist:\n targ_scenelist.remove(self.caller)\n targ.player_ob.db.random_scenelist = targ_scenelist\n targ.player_ob.db.claimed_scenelist = claimed\n self.msg(\"Validating their scene. Both of you will receive xp for it later.\")\n validated.append(targ)\n self.caller.player_ob.db.validated_list = validated\n if targ.key.lower() != name:\n self.masked_validated_list[targ] = name", "def test_privatize_hands(self):\n g = Game()\n g.add_player(uuid4(), 'p0')\n g.add_player(uuid4(), 'p1')\n gs = g\n\n p0, p1 = gs.players\n\n latrine, insula, jack, road = cm.get_cards(['Latrine', 'Insula', 'Jack', 'Road'])\n p0.hand.set_content([latrine, insula])\n p1.hand.set_content([jack, road])\n\n gs_private = g.privatized_game_state_copy('p0')\n p0, p1 = gs_private.players\n\n self.assertIn(jack, p1.hand)\n self.assertIn(Card(-1), p1.hand)\n self.assertNotIn(road, p1.hand)\n\n self.assertIn(latrine, p0.hand)\n self.assertIn(insula, p0.hand)\n\n self.assertEqual(len(p0.hand), 2)\n self.assertEqual(len(p1.hand), 2)", "def display_lists(self):\n for ob in self.scenelist[:]:\n try:\n ob.roster.roster.refresh_from_db()\n ob.roster.refresh_from_db()\n ob.refresh_from_db()\n if ob.roster.roster.name != \"Active\":\n self.caller.player_ob.db.random_scenelist.remove(ob)\n except (AttributeError, TypeError, ValueError):\n pass\n if self.need_to_generate_lists:\n self.generate_lists()\n scenelist = self.scenelist\n claimlist = self.claimlist\n validated = self.validatedlist\n gms = self.gms\n newbies = [ob for ob in self.newbies if ob not in claimlist]\n msg = \"{w@Randomscene Information for this week:{n \"\n if \"online\" in self.switches:\n msg += \"{yOnly displaying online characters.{n\"\n scenelist = [ob for ob in scenelist if ob.show_online(self.caller.player)]\n newbies = [ob for ob in newbies if ob.show_online(self.caller.player)]\n if scenelist:\n msg += \"\\n{wRandomly generated RP partners:{n \"\n msg += list_to_string([ob.key for ob in scenelist])\n if newbies:\n msg += \"\\n{wNew players who can be also RP'd with for credit:{n \"\n msg += list_to_string([ob.key for ob in newbies])\n if gms:\n msg += \"\\n{wGMs for events here that can be claimed for credit:{n \"\n msg += list_to_string(gms)\n if not any((scenelist, newbies, gms)):\n msg += \"\\n{wNo players remain to be claimed.{n\"\n else:\n msg += \"\\n{yReminder: Please only /claim those you have interacted with significantly in a scene.{n\"\n if claimlist:\n msg += \"\\n{wThose you have already RP'd with:{n \"\n msg += list_to_string([ob.key for ob in claimlist])\n if validated:\n msg += \"\\n{wThose you have validated scenes for:{n \"\n masked = dict(self.masked_validated_list)\n msg += list_to_string(\n [ob.key if ob not in masked else masked[ob] for ob in validated]\n )\n if not any((scenelist, newbies, gms, claimlist, validated)):\n msg = \"No characters qualify for @randomscene information to be displayed.\"\n # random RP Tool!\n if (\n not self.caller.db.random_rp_command_this_week\n and not self.caller.db.rp_command_used\n ):\n self.generate_random_command()\n msg += (\n \"\\n|wRandomly chosen Roleplay Tool:|n %s\"\n % self.caller.db.random_rp_command_this_week\n )\n if self.caller.db.rp_command_used:\n msg += \"|y (Already used)|n\"\n self.msg(msg)", "def test_no_oppose_different_sectors(self):\n battle = self.battle\n self.bob.sector = 7\n\n s1 = battle.create_skirmish(self.alice, 2)\n prev_skirmishes = self.sess.query(db.SkirmishAction).count()\n\n with self.assertRaises(db.WrongSectorException):\n s1.react(self.bob, 2)\n\n now_skirmishes = self.sess.query(db.SkirmishAction).count()\n self.assertEqual(prev_skirmishes, now_skirmishes)", "def no_oversuscribed_sessions(prefs, caps):\n out = []\n for room, cap in caps.items():\n # if room capacity > num students, go to next room \n # since this one is safely undersuscribed\n if cap >= len(prefs):\n continue\n # if a room contains N students, then in every group of N+1 students \n # there must be a student not in that session\n stud_combos = combos(prefs, cap+1)\n for group in stud_combos:\n out.append([('%s_%s' % (stud, room), False) for stud in group])\n return out" ]
[ "0.6052868", "0.57790726", "0.5696843", "0.5659765", "0.56102735", "0.55996853", "0.5477823", "0.544987", "0.54137665", "0.54113007", "0.5380825", "0.5371291", "0.5340069", "0.53215677", "0.53103054", "0.5306986", "0.53066045", "0.52727365", "0.52668405", "0.52482265", "0.524552", "0.5224161", "0.5162046", "0.51554483", "0.5149735", "0.5145168", "0.5136209", "0.51296866", "0.51250154", "0.5119054" ]
0.6565452
0
Each participant can only response once to a skirmishaction
def test_single_response_to_skirmish(self): s1 = self.battle.create_skirmish(self.alice, 1) s1.react(self.bob, 1) with self.assertRaises(db.InProgressException): s1.react(self.bob, 1) n = (self.sess.query(db.SkirmishAction). count()) self.assertEqual(n, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_disallow_betrayal(self):\n s1 = self.battle.create_skirmish(self.alice, 1)\n with self.assertRaises(db.TeamException):\n s1.react(self.bob, 1, hinder=False)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 1)", "def test_no_adds_to_overdraw_skirmish(self):\n s1 = self.battle.create_skirmish(self.alice, 99)\n with self.assertRaises(db.InsufficientException):\n s1.react(self.alice, 2, hinder=False)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 1)", "def test_stop_hitting_yourself(self):\n s1 = self.battle.create_skirmish(self.alice, 1)\n with self.assertRaises(db.TeamException):\n s1.react(self.alice, 1, hinder=True)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 1)", "def test_anon_shared(self):\n self.do_sharable(False, 'pattieblack', None)\n self.do_sharable(False, 'pattieblack', FakeMembership(True))", "def test_battle_skirmish_assoc(self):\n battle = self.battle\n\n s1 = battle.create_skirmish(self.alice, 1)\n s2 = battle.create_skirmish(self.bob, 1)\n\n s3 = s2.react(self.alice, 1)\n\n self.assertEqual(len(battle.skirmishes), 3)\n self.assertIn(s1, battle.skirmishes)\n self.assertIn(s2, battle.skirmishes)\n # s3 should inherit its battle from its parents\n self.assertIn(s3, battle.skirmishes)\n\n self.assertEqual(s1.battle, battle)", "def claim_scene(self):\n targ = self.caller.search(self.lhs)\n if not targ:\n return\n try:\n cannot_claim = bool(targ.fakename)\n except AttributeError:\n cannot_claim = True\n messagelist = list(self.scenelist) + list(self.newbies) + list(self.gms)\n err = \"\"\n if targ == self.caller or cannot_claim:\n err = \"You cannot claim '%s'.\" % self.lhs\n elif not self.rhs:\n err = \"You must include some summary of the scene. It may be quite short.\"\n elif targ in self.claimlist:\n err = \"You have already claimed a scene with %s this week.\" % self.lhs\n elif targ not in messagelist:\n err = (\n \"%s is not in your list of random scene partners this week.\" % self.lhs\n )\n if err:\n self.msg(err)\n return\n requests = targ.db.scene_requests or {}\n tup = (self.caller, self.rhs)\n name = self.caller.name\n from server.utils.arx_utils import strip_ansi\n\n name = strip_ansi(name)\n requests[name.lower()] = tup\n targ.db.scene_requests = requests\n msg = (\n \"%s has submitted a RP scene that included you, for which you have received xp. \"\n % name\n )\n msg += \"Validating it will grant them xp.\"\n msg += \"\\n\\nTheir summary of the scene was the following: %s\" % self.rhs\n msg += \"\\nIf you ignore this request, it will be wiped in weekly maintenance.\"\n msg += \"\\nTo validate, use {w@randomscene/validate %s{n\" % name\n msg += \"\\n{rYou are already flagged for xp, and are not penalized in any way for ignoring a request \"\n msg += \"from someone who did not meaningfully interact with you.{n\"\n targ.player_ob.inform(msg, category=\"Validate\")\n inform_staff(\n \"%s has completed this random scene with %s: %s\"\n % (self.caller.key, targ, self.rhs)\n )\n self.msg(\n \"You have sent %s a request to validate your scene: %s\"\n % (self.lhs, self.rhs)\n )\n our_requests = self.requested_validation\n our_requests.append(targ)\n self.caller.player_ob.db.requested_validation = our_requests\n if targ in self.scenelist:\n self.scenelist.remove(targ)", "def test_not_unopposed(self):\n s1 = self.battle.create_skirmish(self.alice, 2) # Attack 2\n s1.react(self.bob, 1) # --Attack 1\n s1.resolve()\n self.assertFalse(s1.unopposed)", "def test_post_same_user_twice(self):\n self.client.post(self.url, {\"email\": \"[email protected]\"})\n self.client.post(self.url, {\"email\": \"[email protected]\"})\n self.assertEqual(self.event.participants.count(), 1)", "def policy(self, s):\r\n if s.dealer_sum >= 16:\r\n return Action.STICK\r\n else:\r\n return Action.HIT", "def test_ended_skirmishes_block(self):\n skirmish, _ = self.start_endable_skirmish()\n self.end_skirmish(skirmish)\n\n with self.assertRaises(db.InProgressException):\n self.battle.create_skirmish(self.alice, 1)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 1)", "def test_no_overdraw_skirmish(self):\n with self.assertRaises(db.InsufficientException):\n self.battle.create_skirmish(self.alice, 9999999)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 0)", "def resistant_asymptomatic_infection(self, s):\n if self.disease_status == 0:\n self.time_since_infection = 0\n s.infected.add(self.identifier)\n s.resistant.add(self.identifier)\n elif self.disease_status == 1:\n s.number_of_symptomatic -= 1\n s.resistant.add(self.identifier)\n self.disease_status = 4\n s.number_of_res_asymp += 1", "def ndemeye(self, message):\n\n try: activate(message.contact.language)\n except: activate('rw')\n\n try:\n message.reporter = Reporter.objects.filter(national_id = message.connection.contact.name )[0]\n except Exception, e:\n try: message.supervisor = Supervisor.objects.filter(email = message.connection.contact.name )[0]\n except Exception,e:\n message.respond(_(\"You need to be registered first\"))\n return True\n\n try:\n cnf = RegistrationConfirmation.objects.get(reporter = message.reporter)\n cnf.received = datetime.now()\n cnf.responded = True\n cnf.answer = True\n cnf.save()\n except Exception, e:\n print e\n if message.supervisor:\n message.respond(\"Muraho murakomeye! Ohereza ijambo 'WHO' urebeko wanditse neza, kandi wibutse abajyanamako bagomba kohereza ubutumwa kuri %s. Murakoze\" % settings.SHORTCODE) \n else: message.respond(_(\"You need to be registered first\"))\n return True \t\t\t \n\n message.respond(\"Muraho murakomeye! Mwatangira kohereza ubutumwa ku buzima bw'umubyeyi n'umwana kuri Rapidsms numero %s.\\\n Ohereza ijambo 'WHO' urebeko wanditse neza. Murakoze\" % settings.SHORTCODE)\n\n return True", "def test_choose_interview_slot_if_slot_is_already_taken(self):\n self.interview_slot1.student = self.student2\n self.interview_slot1.save()\n url = reverse('course_interviews:confirm_slot')\n data = {\n \"slot_id\": self.interview_slot1.id,\n \"student_uuid\": self.student1.uuid\n }\n response = self.client.post(url, data, follow=True)\n\n self.assertEqual(response.status_code, 404)", "def confirmed(self):", "def test_ejection_after_battle(self):\n self.battle.submission_id = \"TEST\" # So update_all will work correctly\n\n old_bob_region = self.bob.region\n old_alice_region = self.alice.region\n self.battle.create_skirmish(self.alice, 5)\n\n self.end_battle()\n\n self.assertEqual(self.battle.victor, self.alice.team)\n\n self.assertNotEqual(self.bob.region, self.alice.region)\n self.assertNotEqual(self.bob.region, old_bob_region)\n self.assertEqual(self.alice.region, old_alice_region)", "def asymptomatic_infection(self, s):\n if self.disease_status == 0:\n self.disease_status = 2\n self.time_since_infection = 0\n s.number_of_asymptomatic += 1\n s.infected.add(self.identifier)", "def test_empty_shared(self):\n self.do_sharable(False, 'pattieblack', None, is_admin=True)\n self.do_sharable(False, 'pattieblack', FakeMembership(True),\n is_admin=True)", "def test_auth_sharable_owned(self):\n self.do_sharable(True, 'pattieblack', None, tenant='pattieblack')", "def should_pay_attention(self):\n return random.randint(1,100) > self.wander", "def test_supply_ambush(self):\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 2)\n s2 = s1.react(self.alice, 2, hinder=False)\n s2.react(self.bob, 2, troop_type=\"cavalry\")\n\n # Alice still wins, though - the margin attack is just to stop\n # reinforcements\n result = s1.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.alice.team)", "def surrender(self) -> Optional[str]:\n\n error_message: Optional[str] = None\n if len(self.hands[0].cards) > 2:\n error_message = \"Cannot surrender because you have already hit!\"\n\n elif len(self.hands) == 2:\n error_message = \"Cannot surrender because you have already splitted!\"\n\n else:\n self.bet //= 2\n self.hands[0]._points = 0\n\n return error_message", "def test_message_as_student(self, do_student_launch):\n\n response = do_student_launch()\n\n assert_launched_as_student(response)", "def collect(self, player: Player):\n player.set_invincible(True)", "def is_eligible(self, myself):\n if self.author().screen_name == myself.screen_name:\n log_.debug(\"Not replying to my own tweets\")\n return False\n if self.is_retweet():\n log_.debug(\"Not processing pure retweets\")\n return False\n return True", "def test_must_be_subbed_to_send(self) -> None:\n user = self.example_user(\"hamlet\")\n self.login_user(user)\n # Create Saxony as an invite-only stream.\n self.assert_json_success(\n self.common_subscribe_to_streams(user, [\"Saxony\"], invite_only=True)\n )\n\n cordelia = self.example_user(\"cordelia\")\n with self.assertRaises(JsonableError):\n self.send_stream_message(cordelia, \"Saxony\")", "def test_list_respects_invite_only_bit(self) -> None:\n\n user = self.example_user(\"hamlet\")\n self.login_user(user)\n\n self.common_subscribe_to_streams(user, [\"Saxony\"], invite_only=True)\n self.common_subscribe_to_streams(user, [\"Normandy\"], invite_only=False)\n result = self.api_get(user, \"/api/v1/users/me/subscriptions\")\n response_dict = self.assert_json_success(result)\n self.assertIn(\"subscriptions\", response_dict)\n for sub in response_dict[\"subscriptions\"]:\n if sub[\"name\"] == \"Normandy\":\n self.assertEqual(\n sub[\"invite_only\"], False, \"Normandy was mistakenly marked private\"\n )\n if sub[\"name\"] == \"Saxony\":\n self.assertEqual(sub[\"invite_only\"], True, \"Saxony was not properly marked private\")", "def make_eligible(self):\n pass", "def block_one(self):", "def hesitant_action(self):\n if not self.agent.done:\n if not self.opponenet.done:\n self.EV = self.opponenet.pumps - np.random.randint(1,5)\n else:\n if self.opponenet.cashed:\n self.EV = self.opponenet.pumps + 1\n elif self.opponenet.popped:\n if not self.stopCount:\n if self.agent.pumps == 0:\n self.EV = np.random.randint(1,10)\n else:\n self.EV = self.agent.pumps\n self.stopCount = True\n self.action_gating()" ]
[ "0.6221245", "0.60473037", "0.6030545", "0.5727721", "0.5595633", "0.5559653", "0.5541494", "0.5517209", "0.5507739", "0.5506363", "0.5452872", "0.5406337", "0.5401216", "0.53752905", "0.53640044", "0.5348013", "0.5331567", "0.5330819", "0.5329682", "0.5308434", "0.5278392", "0.52746105", "0.52621746", "0.52565706", "0.52383447", "0.5238167", "0.52338433", "0.52208114", "0.5202918", "0.5202127" ]
0.65196186
0
Can't start a skirmish with more loyalists than you have
def test_no_overdraw_skirmish(self): with self.assertRaises(db.InsufficientException): self.battle.create_skirmish(self.alice, 9999999) n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None). filter_by(participant=self.alice)).count() self.assertEqual(n, 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_no_adds_to_overdraw_skirmish(self):\n s1 = self.battle.create_skirmish(self.alice, 99)\n with self.assertRaises(db.InsufficientException):\n s1.react(self.alice, 2, hinder=False)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 1)", "def test_no_overkill(self):\n s1 = self.battle.create_skirmish(self.alice, 10) # Attack 10\n s1.react(self.carol, 10, hinder=False) # Right amount = ok\n\n with self.assertRaises(db.TooManyException):\n s1.react(self.bob, 11)", "def test_stop_hitting_yourself(self):\n s1 = self.battle.create_skirmish(self.alice, 1)\n with self.assertRaises(db.TeamException):\n s1.react(self.alice, 1, hinder=True)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 1)", "def test_ended_skirmishes_block(self):\n skirmish, _ = self.start_endable_skirmish()\n self.end_skirmish(skirmish)\n\n with self.assertRaises(db.InProgressException):\n self.battle.create_skirmish(self.alice, 1)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 1)", "def test_disallow_fighting_retreat(self):\n londo = self.get_region(\"Orange Londo\")\n self.alice.move(100, londo, 60 * 60 * 24)\n\n with self.assertRaises(db.InProgressException):\n self.battle.create_skirmish(self.alice, 1)", "def test_no_rookies_toplevel(self):\n self.bob.recruited = now() + 6000\n\n # Top level\n with self.assertRaises(db.TimingException):\n self.battle.create_skirmish(self.bob, 1)\n\n self.assertEqual(self.sess.query(db.SkirmishAction).count(), 0)", "def test_disallow_betrayal(self):\n s1 = self.battle.create_skirmish(self.alice, 1)\n with self.assertRaises(db.TeamException):\n s1.react(self.bob, 1, hinder=False)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 1)", "def test_single_response_to_skirmish(self):\n s1 = self.battle.create_skirmish(self.alice, 1)\n s1.react(self.bob, 1)\n\n with self.assertRaises(db.InProgressException):\n s1.react(self.bob, 1)\n\n n = (self.sess.query(db.SkirmishAction).\n count())\n self.assertEqual(n, 2)", "def test_no_reply_to_expired_skirmish(self):\n s1, s2 = self.start_endable_skirmish()\n self.end_skirmish(s1)\n with self.assertRaises(db.TimingException):\n s1.react(self.dave, 1)\n\n # Make sure the non-root nodes also don't allow it\n with self.assertRaises(db.TimingException):\n s2.react(self.carol, 1)", "def event1923():\n header(1923)\n end_if_this_event_on()\n if_player_has_special_effect(0, SPEFFECT.ExileSoulEffect)\n item.award_item_to_host_only(ITEMLOT.ExileSoulReward)", "def test_disallow_absent_fighting(self):\n londo = self.get_region(\"Orange Londo\")\n self.alice.region = londo\n self.sess.commit()\n\n with self.assertRaises(db.NotPresentException):\n self.battle.create_skirmish(self.alice, 1)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 0)", "def test_bad_attack_types(self):\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 10) # Attack 10 infantry\n s1.react(self.bob, 10, troop_type='ranged') # --Attack 10 ranged\n\n # Ranged should get a 50% penalty here, for a total of 10/2 = 5\n # So Alice should win by 5 despite lesser numbers\n result = s1.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.alice.team)\n self.assertEqual(result.margin, 5)\n self.assertEqual(result.vp, 10)\n\n s2 = battle.create_skirmish(self.bob, 10, # attack 10 ranged\n troop_type='ranged')\n s2.react(self.alice, 10, troop_type='cavalry') # -- oppose 10 cavalry\n result = s2.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.bob.team)\n self.assertEqual(result.margin, 5)\n self.assertEqual(result.vp, 10)\n\n s3 = battle.create_skirmish(self.carol, 10, # Attack 10 cavalry\n troop_type='cavalry')\n s3.react(self.bob, 10) # -- oppose 10 infantry\n result = s3.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.carol.team)\n self.assertEqual(result.margin, 5)\n self.assertEqual(result.vp, 10)", "def test_no_early_fights(self):\n self.battle.begins = now() + 60 * 60 * 12\n\n self.assertFalse(self.battle.is_ready())\n self.assertFalse(self.battle.has_started())\n\n with self.assertRaises(db.TimingException):\n self.battle.create_skirmish(self.alice, 1)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 0)", "def test_guider_start_ffsOpen(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['boss_science'])\n self._guider_start(5, 17, 0, 0)", "def test_single_toplevel_skirmish_each(self):\n self.battle.create_skirmish(self.alice, 1)\n\n with self.assertRaises(db.InProgressException):\n self.battle.create_skirmish(self.alice, 1)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 1)", "def enough_players():\n return True", "def test_disallow_retreat(self):\n self.battle.create_skirmish(self.alice, 1)\n londo = self.get_region(\"Orange Londo\")\n\n with self.assertRaises(db.InProgressException):\n self.alice.move(100, londo, 0)\n\n n = (self.sess.query(db.MarchingOrder).\n filter_by(leader=self.alice)).count()\n self.assertEqual(n, 0)", "def test_no_decommit_after_skirmishes(self):\n skirmish, _ = self.start_endable_skirmish(alice_forces=5, bob_forces=5)\n self.end_skirmish(skirmish)\n\n self.assertEqual(5, self.alice.committed_loyalists)", "async def train_overlord(self):\n if self.supply_left < 3 and not self.already_pending(UnitTypeId.OVERLORD):\n self.train(UnitTypeId.OVERLORD)", "def test_cant_start_fight_in_sector_zero(self):\n battle = self.battle\n self.alice.sector = 0\n\n prev_skirmishes = self.sess.query(db.SkirmishAction).count()\n with self.assertRaises(db.NoSuchSectorException):\n battle.create_skirmish(self.alice, 2)\n now_skirmishes = self.sess.query(db.SkirmishAction).count()\n self.assertEqual(prev_skirmishes, now_skirmishes)", "async def _release_heist(self, ctx):\r\n author = ctx.message.author\r\n guild = ctx.guild\r\n player_time = await self.thief.get_member_timeserved(author)\r\n base_time = await self.thief.get_member_sentence(author)\r\n oob = await self.thief.get_member_oob(author)\r\n\r\n # Theme variables\r\n theme = await self.thief.get_guild_theme(guild)\r\n t_jail = theme[\"Jail\"]\r\n t_sentence = theme[\"Sentence\"]\r\n\r\n if await self.thief.get_member_status(author) != \"Apprehended\" or oob:\r\n await ctx.send(\"I can't remove you from {0} if you're not \"\r\n \"*in* {0}.\".format(t_jail))\r\n return\r\n\r\n remaining = self.thief.cooldown_calculator(player_time, base_time)\r\n if remaining != \"No Cooldown\":\r\n await ctx.send(\"You still have time on your {}. You still need to wait:\\n\"\r\n \"```{}```\".format(t_sentence, remaining))\r\n return\r\n\r\n msg = \"You served your time. Enjoy the fresh air of freedom while you can.\"\r\n\r\n if oob:\r\n msg = \"You are no longer on probation! 3x penalty removed.\"\r\n await self.thief.set_member_oob(author, False)\r\n\r\n await self.thief.set_member_sentence(author, 0)\r\n await self.thief.set_member_timeserved(author, 0)\r\n await self.thief.set_member_free(author)\r\n\r\n await ctx.send(msg)", "async def _play_heist(self, ctx):\r\n author = ctx.message.author\r\n guild = ctx.guild\r\n config = await self.thief.get_guild_settings(guild)\r\n theme = await self.thief.get_guild_theme(guild)\r\n crew = await self.thief.config.guild(guild).Crew()\r\n\r\n await self.thief.check_server_settings(guild)\r\n await self.thief.check_member_settings(author)\r\n\r\n cost = config[\"Cost\"]\r\n wait_time = config[\"Wait\"]\r\n prefix = ctx.prefix\r\n\r\n # Theme Variables\r\n t_crew = theme[\"Crew\"]\r\n t_heist = theme[\"Heist\"]\r\n t_vault = theme[\"Vault\"]\r\n\r\n outcome, msg = await self.thief.requirement_check(prefix, author, cost)\r\n\r\n if outcome == \"Failed\":\r\n return await ctx.send(msg)\r\n\r\n if not config[\"Planned\"]:\r\n await bank.withdraw_credits(author, cost)\r\n config[\"Planned\"] = True\r\n await self.thief.config.guild(guild).Config.set(config)\r\n crew = await self.thief.add_crew_member(author)\r\n await ctx.send(\"A {4} is being planned by {0}\\nThe {4} \"\r\n \"will begin in {1} seconds. Type {2}heist play to join their \"\r\n \"{3}.\".format(author.name, wait_time, ctx.prefix, t_crew, t_heist))\r\n await asyncio.sleep(wait_time)\r\n \r\n crew = await self.thief.config.guild(guild).Crew()\r\n\r\n if len(crew) <= 1:\r\n await ctx.send(\"You tried to rally a {}, but no one wanted to follow you. The \"\r\n \"{} has been cancelled.\".format(t_crew, t_heist))\r\n await self.thief.reset_heist(guild)\r\n else:\r\n await self.heist_game(ctx, guild, t_heist, t_crew, t_vault)\r\n\r\n else:\r\n await bank.withdraw_credits(author, cost)\r\n crew = await self.thief.add_crew_member(author)\r\n crew_size = len(crew)\r\n await ctx.send(\"{0} has joined the {2}.\\nThe {2} now has {1} \"\r\n \"members.\".format(author.display_name, crew_size, t_crew))", "def hire(name):\r\n print(\"A CEO cannot be hired outright\")", "def lysis(self) :\n self.kill()\n return True", "def healthcare():", "def yell():\n ground_description_int = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS]\n if ground_description_int != 12:\n printmessage(\"You yell, but nobody hears you.\", 5, CYAN, 1)\n else:\n printmessage(\"You have found the ranger, amd won the game!\", 5, GREEN, 3)\n die(\"ranger\")", "def test_not_unopposed(self):\n s1 = self.battle.create_skirmish(self.alice, 2) # Attack 2\n s1.react(self.bob, 1) # --Attack 1\n s1.resolve()\n self.assertFalse(s1.unopposed)", "def test_no_support_different_sectors(self):\n battle = self.battle\n self.carol.sector = 7\n\n s1 = battle.create_skirmish(self.alice, 2)\n prev_skirmishes = self.sess.query(db.SkirmishAction).count()\n with self.assertRaises(db.WrongSectorException):\n s1.react(self.carol, 2, hinder=False)\n now_skirmishes = self.sess.query(db.SkirmishAction).count()\n self.assertEqual(prev_skirmishes, now_skirmishes)", "def test_skirmish_end(self):\n s1, s2 = self.start_endable_skirmish()\n\n self.assertTrue(s1.ends)\n self.assertFalse(s1.is_resolved())\n\n # Go through one round of battle updating to verify skirmish\n # doesn't end early\n sess = self.sess\n db.Battle.update_all(sess)\n\n self.assertTrue(s1.ends)\n self.assertFalse(s1.is_resolved())\n self.assertFalse(s2.is_resolved())\n\n # Force skirmish end\n self.end_skirmish(s1)\n\n # Skirmish should have been resolved\n self.assertTrue(s1.is_resolved())\n self.assertTrue(s2.is_resolved())\n\n # With alice as the victor\n self.assertEqual(s1.victor, self.alice.team)", "def test_supply_ambush(self):\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 2)\n s2 = s1.react(self.alice, 2, hinder=False)\n s2.react(self.bob, 2, troop_type=\"cavalry\")\n\n # Alice still wins, though - the margin attack is just to stop\n # reinforcements\n result = s1.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.alice.team)" ]
[ "0.597504", "0.5931134", "0.59167415", "0.5845247", "0.56866354", "0.5659165", "0.5635481", "0.5633224", "0.5616283", "0.5575347", "0.55748", "0.55045563", "0.5464928", "0.54558235", "0.5416244", "0.54081935", "0.5394443", "0.53809035", "0.5369932", "0.5360778", "0.53253394", "0.53121024", "0.5298908", "0.52857375", "0.5270852", "0.52545035", "0.52537745", "0.5249432", "0.5224793", "0.5185797" ]
0.6116105
0
But you can move away if it's to another sector in the same region
def test_allow_sector_movement_in_battle(self): self.conf["game"]["num_sectors"] = 7 self.conf["game"]["allow_sector_retreat"] = True self.battle.create_skirmish(self.alice, 1) curr = self.alice.region self.alice.move(100, curr, 15, sector=3, conf=self.conf) n = (self.sess.query(db.MarchingOrder). filter_by(leader=self.alice)).count() self.assertEqual(n, 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def release_cytokine(self) :\n for cy in self.cytokine :\n if self.cytosol.get(cy, False) and self.cytosol[cy] > 0:\n cy(self.pos, tool.rand_2D(1.2))\n self.cytosol[cy] -= 1\n else : \n self.ribosome.mrna(cy, 100)\n self.cytokine.clear()", "def spinAround(self):", "def move(self):\n # neighbor offsets\n offset = [(-1, 1),(0, 1),(1, 1),(-1, 0),(1, 0),(-1, -1),(0, -1),(1, -1)]\n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x, y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move", "def move(self):\n # neighbor offsets\n offset = [(-1, 1),(0, 1),(1, 1),(-1, 0),(1, 0),(-1, -1),(0, -1),(1, -1)]\n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x, y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move", "def move_to_position2(self):", "def reenter(self, pt):\n if self.idx < 10:\n angle = 0\n else:\n angle = -50\n self.lock = 3\n self.open_gripper(-15)\n time.sleep(2)\n frame = get_frame(np.ravel(self.get_current_cartesian_position().position) + np.array([0,0.018,0.01]), 0)\n # self.move_cartesian_frame_linear_interpolation(frame, 0.1)\n time.sleep(2)\n self.home()\n time.sleep(1)\n self.gripper.reset()\n pt = np.array(pt)\n pt[0] -= 0.00\n pt[2] += 0.0005\n print pt\n notch.cut_notch_angle(pt, self, angle)\n print \"WAT\"\n time.sleep(2)\n # self.gripper.execute_action((0, 0, 2))\n frame = tfx.pose(np.ravel(self.get_current_cartesian_position().position) + np.array([0,0,0.005]), np.array(self.get_current_cartesian_position().orientation))\n # self.move_cartesian_frame_linear_interpolation(frame, 0.1)\n # time.sleep(2)\n # frame = get_frame(np.ravel(self.get_current_cartesian_position().position), -50)\n # self.move_cartesian_frame_linear_interpolation(frame, 0.04)\n time.sleep(2)\n self.open_gripper(1)\n time.sleep(2)\n self.open_gripper(75)\n time.sleep(2)\n return", "def move(self, new_home):\n #checked#\n ###your code here###\n if self.home!=None:\n self.home.occupant=None\n new_home.occupant=self\n self.home=new_home", "def _walk(self):\n \n newpos= self.rect.move((self.move, 0)) # x方向移動 .move, y方向不動。\n \n # 偵測碰撞左右牆壁,並處理(反彈)\n if not self.area.contains(newpos):\n if self.rect.left < self.area.left or \\\n self.rect.right > self.area.right:\n self.move = -self.move\n newpos = self.rect.move((self.move, 0))\n self.image = pygame.transform.flip(self.image, 1, 0)\n self.rect = newpos", "def backToMiddlePos():\n\tprogMode(True) # Active le couple de servos\n\taxDriver.goToPosition(axDriver.BROADCASTID, 0x1FF) # Renvoie a la position 0x1FF", "def move_of_king_and_rook(self, from_row, from_col, to_row, to_col): \n #provjere da li su kraljevi ili topovi inicirali pomijeranje\n if(from_row == 7 and from_col == 0):\n self.wrl_moved = True\n elif(from_row == 7 and from_col == 7):\n self.wrr_moved = True\n elif(from_row == 7 and from_col == 4):\n self.wk_moved = True\n elif(from_row == 0 and from_col == 0):\n self.brl_moved = True\n elif(from_row == 0 and from_col == 7):\n self.brr_moved = True\n elif(from_row == 0 and from_col == 4):\n self.bk_moved = True\n \n #provjera da li je neko pojeo topove\n if(to_row == 7 and to_col == 0):\n self.wrl_moved = True\n elif(to_row == 7 and to_col == 7):\n self.wrr_moved = True\n elif(to_row == 0 and to_col == 0):\n self.brl_moved = True\n elif(to_row == 0 and to_col == 7):\n self.brr_moved = True", "def _move(self, pos):\n self.put_par(\"drive\", pos)", "def crowned(self): # called when this piece has become a 'King'\r\n \r\n self.isKing = True", "def move_to_position1(self):", "def return_to_center(): #ignore this for now, use move_to_position_(0,0)\n current_pos = '\\xAA\\xBB\\xCC\\xDD'\n #run command until back to center (0,0)\n while True: #change the byte locations\n current_pos = to_center()\n print(current_pos)\n time.sleep(0.2) #check timing\n if((current_pos[1] == 0) and (current_pos[1] == 0)):\n break\n print('At center')", "def test_moves_to_walkable_area_in_the_north(self):\n picture = \"\"\"\n ----------- level z=0 :\n ###\n # #\n # #\n ###\n -----------\n \"\"\"\n (state, actions) = self.get_brain_decisions(\n picture, entrance_point=Point(1, 2, 0), my_position=Point(1, 2, 0))\n\n assert_that(actions).is_length(1)\n action = actions.pop()\n assert_that(action.action_code).is_equal_to(ActionCode.MOVE)\n assert_that(action.direction).is_equal_to(Direction.NORTH)", "def interaction_hole(self) -> None:\n x_dead_char = self.moving_character.x_obj\n y_dead_char = self.moving_character.y_obj\n void = ob.Void(x_dead_char, y_dead_char)\n # Replacing character by a Void\n self.grid.obj_list[self.moving_character] = void\n del self.grid.character_list[self.index_character]\n self.grid.character_just_died = True", "def move2(self):\n\n options = self.location.exits.keys()\n for key in options:\n if self.location.exits[key] == p.location:\n self.location.objects.remove(a)\n self.location = p.location\n self.location.objects.append(a)\n print('fred entered the room')\n self.attack(['attack', str(p.name)])\n break\n else:\n self.move1()", "def minusToHome():\n\tif (not checkMotorsInPosition(-134.76, -34.197)):\n\t\treturn\n\n\tmoveMotor(dktheta, 0)\n\tmoveMotor(dkappa, 0)\n\tsimpleLog(\"Done\")", "def unmakeMove(self, move):", "def _walk(self):\n new_pos = self.rect.move((self.move, 0)) # move 9 pixel to the right per frame\n if self.rect.left < self.area.left or self.rect.right > self.area.right:\n self.move = -self.move # move to the opposite direction when the chimp position exceeds the screen\n new_pos = self.rect.move((self.move, 0))\n self.image = pygame.transform.flip(\n self.image, 1, 0\n ) # mirror the chimp to make it looks like turning around\n self.rect = new_pos", "def lock_in_soda_can(self):\n move_msg = Move()\n move_msg.lane = director.coldring\n move_msg.location = 1 # 1: fully down\n self.move_pub.publish(move_msg)", "def MoveCurrentSpace(self):\n if self.facing == 0:\n self.y -= 1\n elif self.facing == 1:\n self.x += 1\n elif self.facing == 2:\n self.y += 1\n elif self.facing == 3:\n self.x -= 1", "def move_to_refine(self, des_img_pos, act_img_pos, current_world_pos, increment, img_thresh):\n des_img_x = des_img_pos[0]\n des_img_y = des_img_pos[1]\n act_img_x = act_img_pos[0]\n act_img_y = act_img_pos[1]\n cur_wld_x = current_world_pos[0]\n cur_wld_y = current_world_pos[1]\n new_wld_x = cur_wld_x\n new_wld_y = cur_wld_y\n \n #object to the left -> move left (-wld_y)\n if (act_img_x < des_img_x-img_thresh):\n print(' Moving left')\n new_wld_y = cur_wld_y + increment\n #object to the right -> move right (+wld_y)\n elif (act_img_x > des_img_x+img_thresh):\n new_wld_y = cur_wld_y - increment\n print(' Moving right')\n #object to the top -> move forward (+wld_x)\n if (act_img_y < des_img_y-img_thresh):\n new_wld_x = cur_wld_x + increment\n print(' Moving forward')\n #object to the bottom -> move backward (-wld_x)\n elif (act_img_y > des_img_y+img_thresh):\n new_wld_x = cur_wld_x - increment\n print(' Moving backward')\n \n #move arm to new coordinates\n self.move_to(new_wld_x, new_wld_y, self.move_to_height)\n \n #return new arm position\n return [new_wld_x, new_wld_y]", "def _ispinnedmove(self, from_, to_):\n return False", "def enter_parking_lot(self):\n\n self.start_driving()\n time.sleep(2)\n\n # drive back into gap with strong angle\n self.angle = 25\n self.velocity = -8\n self.drive_thread.driven_distance = 0\n self.distance = 35\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(1)\n\n # drive back until close to wall\n self.angle = 0\n self.velocity = -8\n self.distance = 150\n self.drive_thread.driven_distance = 0\n while self.sensor_manager.rear > 60:\n time.sleep(0.2)\n \n # get into straight position\n self.angle = -25\n self.velocity = -8\n self.distance = 40\n self.drive_thread.driven_distance = 0\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(1)\n \n # drive backwards up to end of gap\n self.angle = 0\n self.velocity = -8\n self.drive_thread.driven_distance = 0\n while self.sensor_manager.rear >= 10:\n print(self.sensor_manager.rear)\n time.sleep(0.5)\n \n self.stop_driving()", "def king_adjust(self, turn):\n\n opposite_turn = next_turn(turn)\n\n original_location_index = (piece_class.KING_LOCATION[turn][0] + piece_class.KING_LOCATION[turn][1] * 8)\n \n# if self.board[original_location_index] == self.empty:\n# print(\"yo\")\n \n self.board[original_location_index].possible_moves = [i for i in self.board[original_location_index].possible_moves if i not in self.long_dict[opposite_turn]]\n \n\n\n #king_path = [i for i in self.board[original_location_index].possible_moves if i not in self.path_dict[opposite_turn]]\n\n #removes moving into check from king path. Can remove this functionality from the move method now.\n #self.board[original_location_index].possible_moves = king_path\n #king_path_index = [(i[0] + i[1]*8) for i in self.board[original_location_index].possible_moves]\n\n \n #for i in king_path:\n # if i in self.long_dict[opposite_turn] or self.check_dict[opposite_turn]:\n # print(\"king path is: \", king_path)\n # king_path.remove(i)\n # print(\"king path is now: \", king_path)\n \n\n\n\n\n #for i in king_path_index:\n # enemy_piece = self.board[i]\n # self.board[i] = self.board[original_location_index]\n # self.board[original_location_index] = self.empty\n # self.loads_pathways(turn)\n # if self.coords[i] in self.path_dict[opposite_turn]:\n # print(\"yo\")\n \n #self.board[original_location_index] = self.board[i]\n #self.board[i] = enemy_piece", "def align(): # open EH and fast shutter\n\t#marAuxiliary.closeMarShield()\n\td2in()\n\td3in()\n\tsh('o')", "def undo_king_piece(self):\n self.king = False\n if self.symbol == '%':\n self.symbol = 'X'\n else:\n self.symbol = 'O'", "def climb(self):\n print(\"Inside WoodElf.climb\")", "def left_twist(self):\n self.turn_by_deg(-179)\n #time.sleep(.1)\n self.stop()\n self.turn_by_deg(-179)\n #time.sleep(.1)\n self.stop()" ]
[ "0.5858422", "0.5820193", "0.5819938", "0.5819938", "0.58107525", "0.5809525", "0.5758881", "0.5746851", "0.5674281", "0.5662257", "0.56605774", "0.56362796", "0.5608948", "0.5545998", "0.5540973", "0.55380243", "0.5533503", "0.5502532", "0.54935884", "0.5492089", "0.54900664", "0.54527175", "0.5448388", "0.54460496", "0.5437851", "0.54262036", "0.54252714", "0.54205763", "0.54128766", "0.5408835" ]
0.6191313
0
Still can't spearhead a skirmish even if your last skirmish is done
def test_ended_skirmishes_block(self): skirmish, _ = self.start_endable_skirmish() self.end_skirmish(skirmish) with self.assertRaises(db.InProgressException): self.battle.create_skirmish(self.alice, 1) n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None). filter_by(participant=self.alice)).count() self.assertEqual(n, 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_skirmish_end(self):\n s1, s2 = self.start_endable_skirmish()\n\n self.assertTrue(s1.ends)\n self.assertFalse(s1.is_resolved())\n\n # Go through one round of battle updating to verify skirmish\n # doesn't end early\n sess = self.sess\n db.Battle.update_all(sess)\n\n self.assertTrue(s1.ends)\n self.assertFalse(s1.is_resolved())\n self.assertFalse(s2.is_resolved())\n\n # Force skirmish end\n self.end_skirmish(s1)\n\n # Skirmish should have been resolved\n self.assertTrue(s1.is_resolved())\n self.assertTrue(s2.is_resolved())\n\n # With alice as the victor\n self.assertEqual(s1.victor, self.alice.team)", "def test_no_overdraw_skirmish(self):\n with self.assertRaises(db.InsufficientException):\n self.battle.create_skirmish(self.alice, 9999999)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 0)", "def test_no_adds_to_overdraw_skirmish(self):\n s1 = self.battle.create_skirmish(self.alice, 99)\n with self.assertRaises(db.InsufficientException):\n s1.react(self.alice, 2, hinder=False)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 1)", "def test_no_decommit_after_skirmishes(self):\n skirmish, _ = self.start_endable_skirmish(alice_forces=5, bob_forces=5)\n self.end_skirmish(skirmish)\n\n self.assertEqual(5, self.alice.committed_loyalists)", "def test_single_response_to_skirmish(self):\n s1 = self.battle.create_skirmish(self.alice, 1)\n s1.react(self.bob, 1)\n\n with self.assertRaises(db.InProgressException):\n s1.react(self.bob, 1)\n\n n = (self.sess.query(db.SkirmishAction).\n count())\n self.assertEqual(n, 2)", "def dance(self):\n if not self.safe_to_dance():\n return False #shutdown\n for x in range(4): \n self.shuffle()\n self.skipp()\n self.spin_dizzy()\n self.for_back()\n self.break_neck()\n self.swiggly()\n self.break_neck()\n self.backward_shimmey()", "def first_strike(self, context, skirm):\n fftb = context.config['game'].get('fftb_time', 0)\n if fftb:\n pre = (context.session.query(SkirmishAction).\n filter_by(battle=skirm.battle,\n participant=skirm.participant)).count()\n cutoff = skirm.battle.begins + fftb\n if now() <= cutoff and pre <= 1:\n return True", "def battle_resting(self):\n pass", "def shar():\n pass", "def test_no_reply_to_expired_skirmish(self):\n s1, s2 = self.start_endable_skirmish()\n self.end_skirmish(s1)\n with self.assertRaises(db.TimingException):\n s1.react(self.dave, 1)\n\n # Make sure the non-root nodes also don't allow it\n with self.assertRaises(db.TimingException):\n s2.react(self.carol, 1)", "def test_stop_hitting_yourself(self):\n s1 = self.battle.create_skirmish(self.alice, 1)\n with self.assertRaises(db.TeamException):\n s1.react(self.alice, 1, hinder=True)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 1)", "def Shuriken(self):\n\t\tprint(self.name.title() + \" is now shotting.\")", "def test_not_unopposed(self):\n s1 = self.battle.create_skirmish(self.alice, 2) # Attack 2\n s1.react(self.bob, 1) # --Attack 1\n s1.resolve()\n self.assertFalse(s1.unopposed)", "def disarm(self):\n pass", "def test_disallow_fighting_retreat(self):\n londo = self.get_region(\"Orange Londo\")\n self.alice.move(100, londo, 60 * 60 * 24)\n\n with self.assertRaises(db.InProgressException):\n self.battle.create_skirmish(self.alice, 1)", "def test_ejection_after_battle(self):\n self.battle.submission_id = \"TEST\" # So update_all will work correctly\n\n old_bob_region = self.bob.region\n old_alice_region = self.alice.region\n self.battle.create_skirmish(self.alice, 5)\n\n self.end_battle()\n\n self.assertEqual(self.battle.victor, self.alice.team)\n\n self.assertNotEqual(self.bob.region, self.alice.region)\n self.assertNotEqual(self.bob.region, old_bob_region)\n self.assertEqual(self.alice.region, old_alice_region)", "def smarter():\r\n pass", "def _stop_attack(self):\n self._add_malicious_blocks_to_honest_dag()\n self._competing_chain_tip_gid = None\n self._first_parallel_block_gid = None", "def main():\n deli = troll_fight()\n if not empty_stack(deli):\n pick_berries(deli)\n else:\n print(\"The Troll has defeated the Goats! /sadface\")", "def test_disallow_retreat(self):\n self.battle.create_skirmish(self.alice, 1)\n londo = self.get_region(\"Orange Londo\")\n\n with self.assertRaises(db.InProgressException):\n self.alice.move(100, londo, 0)\n\n n = (self.sess.query(db.MarchingOrder).\n filter_by(leader=self.alice)).count()\n self.assertEqual(n, 0)", "def smelt(self):\n # Move to the furnace, if already there, nothing happens\n self.move.go_to_furnace()\n\n # Find the ore in the player's backpack\n ore = self.player.backpack.get_item('ore')\n if not ore:\n utils.log(\"INFO\", \"No ore remain, switching task to forging\")\n return self.player.TASKS.FORGE\n\n # Fire the smelter if it is cold\n self.fire_smelter()\n\n # Smelt the ore\n self.player.backpack.use_item('ore', (176, 161), (8, 6))\n\n # Continue smelting\n return self.player.TASKS.SMELT", "def event11515495():\n header(11515495)\n\n if_event_flag_on(0, EVENT.DarkSmoughIsSupport)\n\n wait_random_seconds(12, 17) # Time between intermittent one-off Smough attacks.\n\n end_if_event_flag_on(EVENT.DarkOrnsteinAndSmoughPhaseTwoStarted)\n\n flag.disable_chunk(11515470, 11515479)\n if_entity_health_less_than_or_equal(1, CHR.DarkOrnsteinGiant, 0.25)\n skip_if_condition_false(2, 1)\n flag.enable_random_in_chunk(11515470, 11515478) # Maybe butt slam (3008).\n skip(1)\n flag.enable_random_in_chunk(11515470, 11515477) # No butt slam.\n\n restart()", "def test_no_early_fights(self):\n self.battle.begins = now() + 60 * 60 * 12\n\n self.assertFalse(self.battle.is_ready())\n self.assertFalse(self.battle.has_started())\n\n with self.assertRaises(db.TimingException):\n self.battle.create_skirmish(self.alice, 1)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 0)", "def test_disallow_betrayal(self):\n s1 = self.battle.create_skirmish(self.alice, 1)\n with self.assertRaises(db.TeamException):\n s1.react(self.bob, 1, hinder=False)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 1)", "def test_single_toplevel_skirmish_each(self):\n self.battle.create_skirmish(self.alice, 1)\n\n with self.assertRaises(db.InProgressException):\n self.battle.create_skirmish(self.alice, 1)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 1)", "def test_buff_first_strike_support(self):\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 20) # Attack 20 infantry\n s2 = s1.react(self.bob, 10) # -- oppose 10 infantry\n s3 = s2.react(self.dave, 9, hinder=False) # ---- support 9\n\n s3.buff_with(db.Buff.first_strike())\n\n result = s1.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.bob.team)\n self.assertEqual(result.margin, 1)\n self.assertEqual(result.vp, 20)", "async def play_sniper_rifle(game_state) -> None:\n shelter = game_state.active_player\n if len(game_state.city_deck) > 0:\n top_card = game_state.city_deck[0]\n if top_card.top != ZombieType.SURVIVOR:\n message = f'There is {top_card.top.value} in the city. Should the survivors shoot it[y/n]?\\n>'\n action = await get_action(game_state, message, ['y', 'n'])\n if action == 'y':\n shelter.print(f'One of survivors killed {top_card.top.value} with {Supply.SNIPER.value}!')\n shelter.print('City is safe now!')\n game_state.city_graveyard.append(game_state.get_city_card())\n put_supplies_on_graveyard(game_state, Supply.SNIPER)\n return\n\n big_inside, lesser_counter = count_zombies(game_state)\n\n if big_inside and lesser_counter == 0:\n play_weapon(game_state, Supply.SNIPER, strong=True)\n elif lesser_counter >= 0 and not big_inside:\n play_weapon(game_state, Supply.SNIPER)\n else:\n message = 'What survivors should do[0/1]?\\n[0]: kill big zombie\\n[1]: kill lesser zombie\\n>'\n action = await get_action(game_state, message, ['0', '1'])\n if action == '0':\n play_weapon(game_state, Supply.SNIPER, strong=True)\n else:\n play_weapon(game_state, Supply.SNIPER)", "def test_no_overkill(self):\n s1 = self.battle.create_skirmish(self.alice, 10) # Attack 10\n s1.react(self.carol, 10, hinder=False) # Right amount = ok\n\n with self.assertRaises(db.TooManyException):\n s1.react(self.bob, 11)", "def test_cant_start_fight_in_sector_zero(self):\n battle = self.battle\n self.alice.sector = 0\n\n prev_skirmishes = self.sess.query(db.SkirmishAction).count()\n with self.assertRaises(db.NoSuchSectorException):\n battle.create_skirmish(self.alice, 2)\n now_skirmishes = self.sess.query(db.SkirmishAction).count()\n self.assertEqual(prev_skirmishes, now_skirmishes)", "def test_ties_resolve_correctly(self):\n skirmish, _ = self.start_endable_skirmish(alice_forces=1, bob_forces=1)\n self.assertFalse(skirmish.is_resolved())\n self.end_skirmish(skirmish)\n\n self.assertTrue(skirmish.is_resolved())" ]
[ "0.62651765", "0.6077142", "0.60739297", "0.60575885", "0.60405356", "0.6004787", "0.5990118", "0.5935853", "0.5932327", "0.5794377", "0.57839346", "0.5757753", "0.57397354", "0.5735527", "0.56593466", "0.5652513", "0.56437516", "0.56142443", "0.5613523", "0.5601323", "0.5598583", "0.55611175", "0.5548461", "0.55347663", "0.5528156", "0.5500944", "0.54926664", "0.5478689", "0.5476022", "0.5474088" ]
0.6307598
0
Ties still count as resolved
def test_ties_resolve_correctly(self): skirmish, _ = self.start_endable_skirmish(alice_forces=1, bob_forces=1) self.assertFalse(skirmish.is_resolved()) self.end_skirmish(skirmish) self.assertTrue(skirmish.is_resolved())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_ties(self,rank):\n for k in self._run:\n curr_dict = defaultdict(list)\n qid = self._run[k][0].get_qid()\n for t in self._run[k]:\n # print t.get_str()\n curr_dict[t.get_score()].append(t)\n curr_dict = OrderedDict(sorted(curr_dict.iteritems(),reverse=True))\n cnt = 0\n for score in curr_dict:\n cnt = cnt + 1\n if len(curr_dict[score]) > 1 and cnt<rank:\n self._ties[qid] = 1", "def test_lookup_all_pending(self):\n lookup = Lookup(FindValue, self.target, self.node, self.event_loop)\n # Sanity check that ALPHA slots are full.\n self.assertEqual(self.node.send_find.call_count, ALPHA)\n self.assertEqual(len(lookup.pending_requests), ALPHA)\n self.assertEqual(len(lookup.contacted), ALPHA)\n self.assertEqual(len(lookup.shortlist), K)\n # Re-run _lookup and ensure no further network calls have been made.\n lookup._lookup()\n self.assertEqual(self.node.send_find.call_count, ALPHA)", "def resolve_successful(self):\n\t\traise NotImplementedError()", "def test_complex_resolve_cancel(self):\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 10) # Attack 10\n s2 = s1.react(self.alice, 1, hinder=False) # --Support 1\n s2.react(self.bob, 10) # ----Attack 10\n s3 = s1.react(self.bob, 10) # --Attack 10\n s3.react(self.alice, 10) # ----Attack 10\n\n # Make sure the leaves cancel correctly\n s2result = s2.resolve()\n self.assert_(s2result)\n self.assertEqual(s2result.victor, self.bob.team)\n\n s3result = s3.resolve()\n self.assert_(s3result)\n self.assertEqual(s3result.victor, None)\n\n # All the supports and attacks cancel each other out, winner should\n # be alice by 10 (from original attack)\n result = s1.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.alice.team)\n self.assertEqual(result.margin, 10)\n # 20 vp because she's unopposed\n self.assertEqual(result.vp, 20)", "def _make_ties(self) -> None:\n\n # get all hint spaces with adjacent '?'s\n frontier = {neighbor: self._lookup[neighbor] for pos, space in self._unknowns.items() for neighbor in\n space.neighbors.values() if neighbor and self._lookup[neighbor].hint.isnumeric()}\n\n # use hints to create \"zones\" of '?'-squares along the frontier,\n # detailing the # of mines left to find in each zone.\n for pos, space in frontier.items():\n local_unknowns = {coord for coord in space.neighbors.values() if coord in self._unknowns}\n for unknown in local_unknowns:\n key = frozenset(local_unknowns)\n self._lookup[unknown].zones[key] = self._lookup[unknown].zones.setdefault(key, space.num_undiscovered)\n self._lookup[unknown].zones[key] = min(space.num_undiscovered, self._lookup[unknown].zones[key])\n self._lookup[unknown].ties |= local_unknowns - {unknown}\n self._remaining_zones.update(self._lookup[unknown].zones)\n\n # split overlapping zones into components\n for unknown in self._unknowns.values():\n for zone, num_undiscovered in list(unknown.zones.items()):\n if zone not in unknown.zones:\n continue\n for other_zone, other_num_undiscovered in list(unknown.zones.items()):\n if other_zone in unknown.zones:\n shared = zone & other_zone\n\n if zone < other_zone or (shared and other_num_undiscovered > num_undiscovered):\n # if \"zone\" & \"other_zone\" share members then\n # it is possible to split the zone w/ the higher # of mines\n # into components, \"shared\" & \"not_shared\".\n\n # unknown.zones.pop(other_zone)\n\n not_shared = other_zone - shared\n unknown.zones[not_shared] = other_num_undiscovered - num_undiscovered\n else:\n print(end='')\n return", "def comply(self, counts):\n pass", "def test_ResolvedPackage(self):\n packages = Package.objects.filter(version='2.6.1-1')\n vulnerabilities = Vulnerability.objects.all()\n\n resolved_pkgs_count = ResolvedPackage.objects.count()\n expected_count = packages.count() * vulnerabilities.count()\n\n self.assertEqual(expected_count, resolved_pkgs_count)\n\n for pkg in packages:\n for vuln in vulnerabilities:\n self.assertTrue(ResolvedPackage.objects.filter(\n package=pkg,\n vulnerability=vuln,\n ))", "def number_bites_resolved(self) -> int:\r\n resolved_bites = {\r\n row['bite']\r\n for row in self.rows\r\n if row['completed'] == 'True'\r\n }\r\n\r\n return len(resolved_bites)", "def step(self):\n highest_offer = None\n\n if self.manager is None:\n highest_rep = 0\n\n else:\n highest_rep = self.manager.reputation\n\n for offer in self.offers:\n if offer.manager.reputation > highest_rep:\n highest_offer = offer\n\n if highest_offer is not None:\n highest_offer.accept()\n\n self.offers = []", "def resolve(self):\n\t\tself.player.money -= self.get_cost()\n\t\tself.player.save()\n\n\t\tresult = self.is_successful()\n\t\tif result:\n\t\t\tself.resolve_successful()\n\t\telse:\n\t\t\tself.resolve_failure()\n\t\treturn result", "def resolve(self):\n pass # pragma: no cover", "def test_own_count(self):\n self._test_count_func(it_count)", "def resolve_failure(self):\n\t\tpass", "def resolving(self):\n\treturn self.__resolving", "def get_close(self):\n pool = set()\n\n for f in SUCC_FINGERS:\n pool.update(self.best_finger_succ[f])\n for f in PRED_FINGERS:\n pool.update(self.best_finger_pred[f])\n\n return list(pool)", "def test_lookup_some_pending_some_contacted(self):\n lookup = Lookup(FindValue, self.target, self.node, self.event_loop)\n # Reset in order to manually create the correct state.\n lookup.pending_requests = {}\n lookup.contacted = set()\n self.node.send_find.call_count = 0\n\n # Add a single pending request.\n pending_uuid = str(uuid.uuid4())\n pending_future = asyncio.Future()\n lookup.pending_requests[pending_uuid] = pending_future\n # Add a single contact to the contacted list.\n lookup.contacted.add(lookup.shortlist[0])\n # Sanity check.\n self.assertEqual(1, len(lookup.pending_requests))\n self.assertEqual(1, len(lookup.contacted))\n # Re-run _lookup and check state has been correctly updated.\n lookup._lookup()\n self.assertEqual(ALPHA - 1, self.node.send_find.call_count)\n self.assertEqual(ALPHA, len(lookup.pending_requests))\n self.assertEqual(ALPHA, len(lookup.contacted))", "def pending(self):\n return 0", "def resolve(self):\n raise NotImplementedError", "def resolved(self):\n\n return self.__resolved", "def test_heuristic_abort(self):\n graph = {}\n for u in self.complete:\n graph[u] = set()\n for v in self.complete[u]:\n if u != v: # ignore self-loop\n graph[u].add(v)\n next_node = min_fill_in_heuristic(graph)\n if next_node is None:\n pass\n else:\n assert False", "def test_complex_resolve_bob(self):\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 10) # Attack 10\n s2 = s1.react(self.alice, 10, hinder=False) # --Support 10\n s2.react(self.bob, 1) # ----Attack 1\n s1.react(self.dave, 9) # --Attack 9\n s3 = s1.react(self.bob, 10,\n troop_type=\"cavalry\") # --Attack 10\n s3.react(self.alice, 1) # ----Attack 1\n\n result = s1.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.bob.team)\n self.assertEqual(result.margin, 5)\n self.assertEqual(result.vp, 20)", "def ticktock(self): # controller\n for tractor in self.tractors:\n try:\n next(tractor) # state changer\n except StopIteration:\n pass # harmless stuck tractor signal\n\n self.framenumber += 1\n return self.framenumber", "def test_lookup_none_pending_all_contacted(self):\n lookup = Lookup(FindValue, self.target, self.node, self.event_loop)\n # Put the lookup object in the state to test.\n lookup.pending_requests = {}\n for contact in lookup.shortlist:\n lookup.contacted.add(contact)\n self.node.send_find.call_count = 0\n # Re-run _lookup and test\n lookup._lookup()\n self.assertEqual(self.node.send_find.call_count, 0)", "def _do_resolve_weak_greedy ( self, deps, kw, greedy=True ):\n if 'greedy' in kw:\n return self.do_resolve ( deps, **kw )\n else:\n return self.do_resolve ( deps, greedy=greedy, **kw )", "def _get_last_pending_matches(self, matches):\n for match in self._tree.get_last_matches():\n matches.add_item(match)", "def test_get_recent_tour_complete(self):\n # add user to tour\n self.tour1.load_tour_class().add_user(self.test_user)\n self.tour1.load_tour_class().add_user(self.test_user2)\n self.tour2.load_tour_class().add_user(self.test_user)\n self.tour2.load_tour_class().add_user(self.test_user2)\n\n # complete tours\n self.tour2.load_tour_class().mark_complete(self.test_user)\n self.tour1.load_tour_class().mark_complete(self.test_user)\n self.tour1.load_tour_class().mark_complete(self.test_user2)\n\n # make sure complete\n self.assertEqual(3, TourStatus.objects.filter(complete=True).count())\n\n # check that correct tour is returned\n self.assertEqual(self.tour1, Tour.objects.get_recent_tour(self.test_user))", "def resolve(self): # HashMap.resolve\n prevCount = self.allFiles.count_deleted()\n\n # no need to resolve uniques, so remove them from the HashMap\n deleteList=[]\n for hashval, list in self.contentHash.iteritems():\n if len(list) == 1:\n deleteList.append(hashval)\n for e in deleteList:\n del self.contentHash[e]\n\n # delete the directories first, in order of\n # increasing depth\n if verbose:\n print '# checking candidates from depth ' + str(self.minDepth) + ' through ' + str(self.maxDepth)\n for currentDepth in xrange(self.minDepth-1,self.maxDepth+1):\n for hashval, list in self.contentHash.iteritems():\n example = list[0]\n if isinstance(example, DirObj):\n winner, losers = resolve_candidates(list, currentDepth)\n if losers != None:\n for loser in losers:\n if not loser.deleted:\n if verbose:\n print '# dir \"' + loser.pathname + '\" covered by \"' + winner.pathname + '\"'\n self.delete(loser)\n loser.winner = winner\n self.prune()\n\n for hashval, list in self.contentHash.iteritems():\n example = list[0] \n if isinstance(example, FileObj):\n winner, losers = resolve_candidates(list)\n for loser in losers:\n if not loser.deleted:\n if verbose:\n print '# file \"' + loser.pathname + '\" covered by \"' + winner.pathname + '\"'\n self.delete(loser)\n loser.winner = winner\n\n return self.allFiles.count_deleted() - prevCount", "def test_auto_assign_one_overflow(self):\n shift1 = RegularWorkshift.objects.create(\n workshift_type=self.wtype1,\n pool=self.p1,\n hours=6,\n )\n unfinished = utils.auto_assign_shifts(self.semester)\n self.assertEqual([self.profile], unfinished)\n self.assertNotIn(self.profile, shift1.current_assignees.all())\n\n instances = WorkshiftInstance.objects.filter(weekly_workshift=shift1)\n self.assertGreater(instances.count(), 0)\n self.assertTrue(all(\n instance.workshifter is None\n for instance in instances\n ))\n\n pool_hours = self.profile.pool_hours.get(pool=self.p1)\n self.assertEqual(\n pool_hours.assigned_hours,\n 0,\n )", "def also_track(self):\n return self._also_track", "def test_vp_mark2_complex(self):\n battle = self.battle\n s1 = battle.create_skirmish(self.bob, 30) # Attack with 30 -> 8vp\n s2 = s1.react(self.alice, 15,\n troop_type=\"cavalry\") # Oppose with 30 -> 7vp\n s3 = s2.react(self.bob, 14) # Oppose with 14 -> 1vp\n s3.react(self.alice, 1) # Oppose with 1\n\n s4 = s1.react(self.dave, 10, hinder=False) # Support with 10 -> 10vp\n s4.react(self.carol, 15) # Oppose with 15\n\n result = s1.resolve()\n self.assertEqual(result.victor, self.bob.team)\n\n # 10 because the 1 VP for s3 counts now, and the 1 extra lowers the\n # number of troops bob opposes with, which increases the number of\n # troops in s2, which increases the VP it's worth.\n self.assertEqual(result.vp, 10)" ]
[ "0.5656237", "0.53306234", "0.5308136", "0.5208707", "0.5158958", "0.51538366", "0.5144621", "0.5038073", "0.4976401", "0.49506652", "0.49289313", "0.4908449", "0.48834485", "0.48820204", "0.48729697", "0.48721874", "0.48557138", "0.48290616", "0.48149484", "0.48135033", "0.48071724", "0.47748622", "0.4752234", "0.46990713", "0.46895218", "0.466985", "0.46680346", "0.46486163", "0.46452707", "0.46393538" ]
0.539613
1
Use of codewords in response skirmishes
def test_response_codeword(self): self.bob.add_codeword('muppet', 'ranged') battle = self.battle s1 = battle.create_skirmish(self.alice, 100) s2 = s1.react(self.bob, 100, troop_type='muppet') self.assertEqual(s2.troop_type, 'ranged')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_word_info(self):\n word = \"vitality\"\n rv = self.wordInfo(input_word=word)\n expected_output = {\n word: {\n \"frequency\": \"975\",\n \"defination\": \"{'Noun': ['an energetic style', 'a healthy capacity for vigorous activity', '(biology', 'not physical or chemical', 'the property of being able to survive and grow']}\",\n \"antonyms\": \"['enervation', 'inactivity', 'lethargy', 'weakness', 'lack']\",\n \"examples\": \"{1: 'And finally, both Lord Robertson and Secretary of State Powell pointed to what they called the vitality and the relevance of NATO, and said any damage done to the reputation of NATO over the last couple weeks can quite, in their words, be easily overcome.', 2: \\\"Professor Huxley himself has told us that he lived in 'the hope and the faith that in course of time we shall see our way from the constituents of the protoplasm to its properties,' _i. e._ from carbonic acid, water, and ammonia to that mysterious thing which we call vitality or life -- from the molecular motion of the brain to Socratic wisdom,\\\", 3: 'The strongest, the most amply endowed with what we call vitality or power to live, win.', 4: 'But the thought that it is mechanics and chemistry applied by something of which they as such, form no part, some agent or principle which we call vitality, is welcome to us.', 5: '\\\"The Indian savages,\\\" said Margrave, sullenly, \\\"have not a health as perfect as mine, and in what you call vitality -- the blissful consciousness of life -- they are as sticks and stones compared to me.\\\"'}\",\n \"pronounciation\": \"V AY0 T AE1 L AH0 T IY0\",\n \"synonyms\": \"['vigor', 'continuity', 'spunk', 'strength', 'verve']\"\n }\n }\n response_data = json.loads(rv.get_data(as_text=True))\n\n self.assertEquals(rv.status_code, 200)\n self.assertEquals(response_data[word][\"defination\"], expected_output[word][\"defination\"])\n self.assertEquals(response_data[word][\"antonyms\"], expected_output[word][\"antonyms\"])\n self.assertEquals(response_data[word][\"examples\"], expected_output[word][\"examples\"])\n self.assertEquals(response_data[word][\"frequency\"], expected_output[word][\"frequency\"])\n self.assertEquals(response_data[word][\"pronounciation\"], expected_output[word][\"pronounciation\"])\n self.assertEquals(response_data[word][\"synonyms\"], expected_output[word][\"synonyms\"])", "def get_bot_response(response):\n\n # Response set 1 for Aang\n aang = [\n \"\\\"Harsh words won't solve problems, action will!\\\"\", # S1E11\n \"\\\"The past can be a great teacher.\\\"\", # S3E13\n \"\\\"It's easy to do nothing, but it's hard to forgive.\\\"\", # S3E16\n ]\n\n # Response set 2 for Katara\n katara = [\n \"\\\"It is the strength of your hearts that make you who you are.\\\"\", # S1E6\n \"\\\"Everybody, hold hands. We can do this. We have to.\\\"\", # S2E11\n \"\\\"I will never, ever turn my back on people who need me!\\\"\", # S3E3\n ]\n\n # Response set 3 for Sokka\n sokka = [\n \"\\\"I'm just a guy with a boomerang. I didn't ask for all this flying and magic!\\\"\", # S1E2\n \"\\\"It's a giant mushroom! Maybe it's friendly!\\\"\", # S2E11\n \"\\\"I'm just a guy who likes comedy.\\\"\", # S3E17\n ]\n\n # Response set 4 for Uncle Iroh\n iroh = [\n \"\\\"Pride is not the opposite of shame, but its source.\\n\"\n \"True humility is the only antidote to shame.\\\"\", # S2E9\n\n \"\\\"It is usually best to admit mistakes when they occur,\\n\"\n \"and seek to restore honor.\\\"\", # S2E15\n\n \"\\\"You can't always see the light at the end of the tunnel,\\n\"\n \"but if you just keep moving, you will come to a better place.\\\"\" # S2E20\n ]\n\n # Response set 5 for Cabbage Merchant\n cabbage = [\n \"\\\"No! My cabbages!\\\"\", # S1E5\n \"\\\"My cabbages! This place is worse than Omashu!\\\"\", # S1E9\n \"\\\"My cabb--!... Oh, forget it!\\\"\" # S2E15\n ]\n\n # Pseudorandom response chosen from a response set selected based on keyword from user\n # \"in\" checks if keyword is present in user's inputted phrase\n if \"Aang\" in response or \"aang\" in response:\n return f\"Aang says:\\n{choice(aang)}\"\n elif \"Katara\" in response or \"katara\" in response:\n return f\"Katara says:\\n{choice(katara)}\"\n elif \"Sokka\" in response or \"sokka\" in response:\n return f\"Sokka says:\\n{choice(sokka)}\"\n elif \"Iroh\" in response or \"iroh\" in response:\n return f\"Uncle Iroh says:\\n{choice(iroh)}\"\n elif \"Cabbage\" in response or \"cabbage\" in response:\n return f\"The Cabbage Merchant says:\\n{choice(cabbage)}\"\n else: # Return default statement when user input is unrecognized\n return f\"Hmm... I don't have any quotes for {response}\"", "def test_rhyme_words(self):\n input_word_list = \"climbing\"\n expected_output_list = {\"rhyme\": [\"diming\", \"liming\", \"priming\", \"rhyming\", \"timing\"]}\n rv = self.rhymeWords(input_word=input_word_list)\n response_data = json.loads(rv.get_data(as_text=True))\n self.assertEquals(rv.status_code, 200)\n self.assertEquals(set(ast.literal_eval(response_data[\"rhyme\"])), set(expected_output_list[\"rhyme\"]))", "def extractReservedWords(code):\n reserved_words=[] #https://realpython.com/lessons/reserved-keywords/5646\n code = str(code).replace(\"\\n\", \"\")\n for c in code.split(\" \"): \n if keyword.iskeyword(c): \n reserved_words.append(c) \n str1= \" \"\n return (str1.join(reserved_words))", "def code():", "def get_stopwords(choice = 0):\n low_acc_words = [u'orange', u'game', u'wafe', u'gold', u'gas pump', u'dock', u'magnetic disk', u'beard', u'splash', u'stethoscope', u'clock', u'modem', u'spring', u'dribble', u'scale', u'thing', u'parachute', u'screw', u'haired', u'hair spray', u'stick', u'projectile', u'surface', u'scarf', u'boat', u'lantern', u'weapon', u'fire screen', u'maypole', u'Old World buffalo', u'backpack', u'velvet', u'pistol', u'duplicator', u'tissue', u'holding', u'eel', u'iron', u'zoo', u'toilet seat', u'eye', u'telephone', u'drum', u'pepper', u'church', u'pillow', u'body', u'mink', u'prison', u'color', u'jewelry', u'elephant', u'mug', u'cargo ship', u'football', u'llama', u'wombat', u'ax', u'giant panda', u'bison', u'climber', u'tractor', u'hamster', u'beetle', u'sidewalk', u'oilseed', u'shore', u'feet', u'vending machine', u'nail', u'lock', u'licking', u'crowded', u'pudding', u'library', u'sliding', u'steel drum', u'cutter', u'trench coat', u'plate rack', u'fancy', u'barbershop', u'switch', u'hip', u'petting', u'keyboard', u'drilling platform', u'denim', u'old', u'sewing machine', u'dancing', u'lawn mower', u'jaguar', u'cauliflower', u'bubble', u'tray', u'printer', u'hillside', u'heater', u'store', u'stove', u'hook', u'bed', u'book jacket', u'rain barrel', u'dinosaur', u'rowing', u'surf', u'worm', u'garbage truck', u'laptop', u'mouth', u'flute', u'tape player', u'gym', u'large', u'birdhouse', u'covered', u'groom', u'swan', u'lampshade', u'snowplow', u'ramp', u'bathing cap', u'strainer', u'hard', u'mortarboard', u'penguin', u'wooden spoon', u'loaf of bread', u'window', u\"potter's wheel\", u'branch', u'fly', u'greyhound', u'walk', u'starfish', u'kitchen', u'parking meter', u'cassette', u'work', u'cash machine', u'custard apple', u'play', u'ice cream', u'mosque', u'market', u'swing', u'hay', u'fan', u'surfer', u'number', u'climb', u'golfcart', u'burrito', u'feather boa', u'resting', u'neck brace', u'glove', u'remote control', u'lotion', u'lamp', u'perched', u'jeep', u'necklace', u'shopping basket', u'sea urchin', u'pajama', u'pinwheel', u'foot', u'maze', u'squash', u'dishrag', u'bib', u'ant', u'dumbbell', u'dragonfly', u'bakery', u'lighter', u'salamander', u'sandglass', u'apron', u'cannon', u'palm', u'tent', u'spacecraft', u'oil filter', u'beer bottle', u'throne', u'stretcher', u'bedroom', u'pan', u'camera', u'kiddie', u'mashed potato', u'railing', u'tongue', u'sky', u'event', u'bright', u'curb', u'sundial', u'screwdriver', u'hand blower', u'joystick', u'flower', u'tv', u'back', u'smile', u'mortar', u'bee', u'bath', u'spatula', u'lawn', u'object', u'barrier', u'mailbox', u'fallen', u'crayfish', u'kid', u'metal', u'shot', u'quill', u'snowboarding', u'mud', u'vacuum', u'water tower', u'sleeping bag', u'altar', u'bassoon', u'family', u'shovel', u'leather', u'maillot', u'soap dispenser', u'blurry', u'racetrack', u'dish', u'gondola', u'chewing', u'badger', u'spindle', u'door', u'shaker', u'purse', u'apiary', u'bus', u'wreck', u'cell', u'balance beam', u'lip', u'animal', u'baby', u'toilet', u'armor plate', u'jigsaw puzzle', u'piggy bank', u'leafhopper', u'torch', u'ashcan', u'talking', u'traveling', u'handrail', u'area', u'raft', u'can opener', u'missile', u'syringe', u'pen', u'beacon', u'croquet ball', u'trail', u'snowboard', u'light', u'owl', u'lift', u'acorn', u'pencil box', u'hermit crab', u'binder', u'ladle', u'fire engine', u'tan', u'volcano', u'chocolate sauce', u'crossword puzzle', u'whistle', u'floating', u'forklift', u'hotdog', u'monotreme', u'eggnog', u'traffic', u'envelope', u'surfboard', u'face', u'polecat', u'tiled', u'camel', u'refrigerator', u'carousel', u'parking', u'spider web', u'stream', u'train', u'square', u'candle', u'thimble', u'jellyfish', u'teddy', u'leash', u'wild', u'shopping cart', u'jackfruit', u'office', u'alligator', u'ready', u'end', u'power drill', u'lens cap', u'looking', u'hand', u'fountain', u'radiator', u'French horn', u'graze', u'female', u'koala', u'paper towel', u'artichoke', u'passenger', u'airship', u'cow', u'slug', u'home', u'tug', u'weasel', u'including', u'crutch', u'submarine', u'chime', u'pretty', u'phone', u'barrow', u'purple', u'pulling', u'wing', u'mongoose', u'washer', u'slide', u'Band Aid', u'splashing', u'obstacle', u'flying', u'restaurant', u'pencil sharpener', u'control', u'something', u'tricycle', u'motor', u'watching', u'grey', u'balcony', u'surrounded', u'statue', u'rotisserie', u'puck', u'assorted', u'umbrella', u'measuring cup', u'hanging', u'ride', u'scuba', u'perform', u'tusker', u'desk', u'puddle', u'sea slug', u'team', u'beaker', u'held', u'safe', u'shower curtain', u'isopod', u'tire', u'beaver', u'tower', u'stump', u'dinner', u'conch', u'playground', u'marmot', u'fruit', u'golf ball', u'read', u'tile', u'watch', u'mosquito net', u'goggle', u'swab', u'cricket', u'wheelie', u'guacamole', u'bush', u'cockroach', u'intersection', u'letter opener', u'station', u'plow', u'course', u'aeroplane', u'view', u'racing', u'broom', u'sunny', u'corn', u'matchstick', u'variety', u'messy', u'playpen', u'ambulance', u'perfume', u'brush', u'go', u'shelf', u'look', u'blowing', u'lobster', u'lettuce', u'busy', u'digging', u'trampoline', u'track', u'glass', u'ox', u'handstand', u'assortment', u'vase', u'aircraft carrier', u'microwave', u'high', u'mousetrap', u'bathroom', u'shower cap', u'counter', u'Christmas stocking', u'safety pin', u'plastic', u'garden', u'transit', u'knife', u'docked', u'cluttered', u'serving', u'toddler', u'ledge', u'formation', u'snorkel', u'lying', u'lemon', u'ladybug', u'carry', u'solar dish', u'hammer', u'sleeping', u'saltshaker', u'cowboy', u'unicycle', u'single', u'rule', u'shoji', u'business', u'cup', u'antique', u'catch', u'open', u'carnival', u'cooking', u'rural', u'small', u'wine', u'top', u'flat', u'yurt', u'grasshopper', u'hoop', u'wallet', u'hold', u'someone', u'necked', u'salad', u'leafe', u'paddlewheel', u'porcupine', u'radio telescope', u'preparing', u'canopy', u'pointing', u'honeycomb', u'older', u'hair slide', u'plunger', u'mirror', u'landscape', u'bow', u'cart', u'skateboard', u'device', u'urban', u'sunset', u'attached', u'toward', u'right', u'town', u'four', u'beach wagon', u'close', u'lone', u'chew', u'pile', u'working', u'bottlecap', u'corner', u'swinging', u'behind', u'slot machine', u'food', u'mushroom', u'around', u'tall', u'oxygen mask', u'together', u'veggy', u'skating', u'concrete', u'subway', u'seen', u'head', u'armadillo', u'ly', u'kitten', u'cap', u'painted', u'mustache', u'moving', u'lit', u'sliced', u'sticking', u'milk can', u'roller', u'stainless', u'teeth', u'seated', u'serve', u'lady', u'carriage', u'stand', u'apple', u'paper', u'apartment', u'video', u'eating', u'stadium', u'turn', u'racket', u'stunt', u'plate', u'drinking', u'slice', u'warplane', u'cheese', u'onion', u'backyard', u'coffee', u'peach', u'staring', u'outfit', u'engine', u'coaster', u'striped', u'stacked', u'decorated', u'throwing', u'dirty', u'hula', u'mid', u'catching', u'closed', u'item', u'otter', u'rail', u'tenni', u'sink', u'toaster', u'meal', u'skate', u'fridge', u'pitch', u'kite', u'desktop', u'meat', u'military', u'fireplace', u'show', u'rider', u'rodeo', u'graffiti', u'bunch', u'coming', u'reading', u'walkway', u'another', u'mouse', u'soup', u'hole', u'steel', u'container', u'past', u'carrying', u'equipment', u'farm', u'dressed', u'scooter', u'cellphone', u'stuffed', u'commercial', u'platform', u'full', u'one', u'electronic', u'sprinkler', u'stop', u'along', u'blanket', u'residential', u'kneeling', u'blender', u'oven', u'cattle', u'skateboarder', u'produce', u'book', u'cement', u'bag', u'carrot', u'board', u'round', u'many', u'giant', u'shower', u'asian', u'picnic', u'dining', u'wedding', u'desert', u'huge', u'narrow', u'outside', u'deck', u'three', u'display', u'filled', u'cutting', u'colored', u'ear', u'feeding', u'across', u'eat', u'skateboarding', u'fighter', u'sun', u'darkened', u'brushing', u'ty', u'party', u'pedestrian', u'wet', u'structure', u'different', u'crossbone', u'jet', u'public', u'cooked', u'airplane', u'bread', u'clothe', u'tunnel', u'fishing', u'drife', u'gear', u'birthday', u'frisbee', u'piece', u'row', u'hydrant', u'drawn', u'meter', u'vegetable', u'broccoli', u'country', u'half', u'sandwich', u'doorway', u'lot', u'pair', u'luggage', u'long', u'christma', u'wii', u'guy', u'side', u'leap', u'plane', u'silver', u'post', u'bar', u'reaching', u'drink', u'reflection', u'wand', u'airport', u'photograph', u'type', u'lay', u'lap', u'waterfall', u'banana', u'next', u'baseball', u'hot', u'making', u'gray', u'using', u'batter', u'empty', u'bat', u'clear', u'hospital', u'scissor', u'neck', u'cake', u'alone', u'rope', u'winter', u'runway', u'broken', u'fire', u'getting', u'variou', u'distance', u'beer', u'outstretched', u'chocolate', u'match', u'stopped', u'vintage', u'clean', u'fork', u'cut', u'eaten', u'waiting', u'going', u'onto', u'nintendo', u'time', u'several', u'lined', u'railroad', u'case', u'mother', u'suitcase', u'taking', u'doughnut', u'smoke', u'controller', u'crossing', u'friend', u'closeup', u'couple', u'showing', u'made', u'big', u'trying', u'putting', u'hit', u'male', u'', u'pickelhaube', u'suburban', u'costume', u'enjoy', u'new', u'studio', u'mantis', u'pastum', u'gymnast', u'rafting', u'golden', u'waffle iron', u'watering', u'overhead', u'shoot', u'feature', u'machine', u'attempt', u'third', u'tulip', u'jungle', u'wind', u'fig', u'band', u'bone', u'free', u'cucumber', u'bouncing', u'boarding', u'tackled', u'__background__', u'gymnastic apparatus', u'pineapple', u'folded', u'rice', u'sunglasses', u'cushion', u'net', u'covering', u'pretzel', u'steam', u'santum', u'fair', u'sail', u'score', u'toothbrush', u'loaded', u'fry', u'life', u'glider', u'bounce', u'balance', u'cone', u'containing', u'beside', u'wheel', u'rain', u'spaghetti squash', u'thi', u'left', u'photographer', u'forested', u'vanity', u'shoulder', u'pavement', u'officer', u'creek', u'dead', u'ice', u'slide rule', u'dunking', u'horizon', u'raised', u'fabric', u'fight', u'way', u'war', u'landing', u'umpire', u'fashioned', u'dimly', u'topped', u'setting', u'sling', u'potato', u'painting', u'bottom', u'dance', u'crocodile', u'string', u'dig', u'gun', u'chicken', u'tarmac', u'falling', u'french', u'wait', u'pony', u'decker', u'plaza', u'earphone', u'chip', u'get', u'staircase', u'wakeboarder', u'wheelchair', u'pulled', u'polouse', u'still', u'curly', u'scaling', u'lunch', u'base', u'pizza', u'meat loaf', u'shown', u'opened', u'space', u'mess', u'headband', u'place', u'pelican', u'ring', u'sheet', u'bite', u'frame', u'hug', u'wide', u'lick', u'pastry', u'breakfast', u'take', u'topping', u'multiple', u'knee', u'tackling', u'sale', u'professional', u'german', u'crane', u'snack', u'stair', u'ping-pong ball', u'snowsuit', u'sport', u'bicyclist', u'skyscraper', u'checkered', u'restroom', u'tour', u'nearby', u'foggy', u'bmx', u'newspaper', u'mound', u'foam', u'driven', u'mohawk', u'rest', u'instrument', u'chainsaw', u'towel', u'facing', u'audience', u'served', u'clau', u'go-kart', u'tube', u'throw', u'muddy', u'harness', u'strip', u'racquet', u'prepare', u'low', u'pitcher', u'cardoon', u'gymnasium', u'pull', u'arranged', u'strawberry', u'deep', u'cream', u'rubber', u'trash', u'midair', u'peak', u'remote', u'disc', u'follow', u'potpie', u'enjoying', u'stool', u'leaping', u'action', u'taken', u'chopstick', u'flag', u'mounted', u'grill', u'wrestler', u'marble', u'backpacking', u'breaking', u'fungus', u'shade', u'egg', u'muzzled', u'style', u'carpeted', u'sauce', u'snowball', u'abacus', u'foreground', u'circuit', u'leading', u'airborne', u'hotel', u'leotard', u'kind', u'double', u'scabbard', u'bride', u'stall', u'blond', u'cave', u'electric', u'cigarette', u'sponsored', u'shepherd', u'dandelion', u'catcher', u'movie', u'recently', u'floaty', u'chambered nautilus', u'hitting', u'racer', u'passing', u'leaning', u'kissing', u'chase', u'funny', u'used', u'snail', u'pomegranate', u'stack', u'center', u'grind', u'bin', u'formal', u'shaped', u'signal', u'zucchini', u'parade', u'limb', u'laughing', u'step', u'range', u'slouse', u'block', u'downhill', u'jockey', u'retrieving', u'atop', u'cloth', u'skull', u'diving', u'rainy', u'tarp', u'black-footed ferret', u'nice', u'prepared', u'hot pot', u'land', u'fresh', u'hello', u'wrestle', u'kitty', u'spoon', u'rack', u'smaller', u'hose', u'giving', u'attire', u'leaving', u'chiton', u'singing', u'frog', u'crab', u'porch', u'saddle', u'donut', u'crossed', u'tied', u'tomato', u'chasing', u'scenic', u'beneath', u'boarder', u'hippopotamus', u'wading', u'sea_anemone', u'wrapped', u'shallow', u'steep', u'bagel', u'gather', u'pipe', u'hi', u'ha', u'jar', u'bug', u'finger', u'handle', u'beam', u'bean', u'whilst', u'contain', u'shake', u'attempting', u'merry', u'yawning', u'sniff', u'swimmer', u'commuter', u'bull', u'smoking', u'plain', u'cross', u'member', u'binoculars', u'underneath', u'well', u'fighting', u'bandanna', u'rocket', u'pay-phone', u'five', u'puppy', u'like', u'campfire', u'shaking', u'construction', u'bun', u'partially', u'flip', u'placed', u'bearing', u'pinatum', u'pie', u'boardwalk', u'pit', u'star', u'baked']\n\n STOPWORDS = ['none','inside', 'near', 'one', 'two', 'three', 'day', 'front', u'i', u'me', u'my', u'myself', u'we', u'our', u'ours', u'ourselves', u'you', u'your', u'yours', u'yourself', u'yourselves', u'he', u'him', u'his', u'himself', u'she', u'her', u'hers', u'herself', u'it', u'its', u'itself', u'they', u'them', u'their', u'theirs', u'themselves', u'what', u'which', u'who', u'whom', u'this', u'that', u'these', u'those', u'am', u'is', u'are', u'was', u'were', u'be', u'been', u'being', u'have', u'has', u'had', u'having', u'do', u'does', u'did', u'doing', u'a', u'an', u'the', u'and', u'but', u'if', u'or', u'because', u'as', u'until', u'while', u'of', u'at', u'by', u'for', u'with', u'about', u'against', u'between', u'into', u'through', u'during', u'before', u'after', u'above', u'below', u'to', u'from', u'up', u'down', u'in', u'out', u'on', u'off', u'over', u'under', u'again', u'further', u'then', u'once', u'here', u'there', u'when', u'where', u'why', u'how', u'all', u'any', u'both', u'each', u'few', u'more', u'most', u'other', u'some', u'such', u'no', u'nor', u'not', u'only', u'own', u'same', u'so', u'than', u'too', u'very', u's', u't', u'can', u'will', u'just', u'don', u'should', u'now', 'background', '__background__', '']\n \n\n unselected_words = [u'', u'pickelhaube', u'enjoy', u'new', u'studio', u'kissing', u'mantis', u'pastum', u'rafting', u'golden', u'waffle iron', u'watering', u'overhead', u'shoot', u'feature', u'machine', u'pizza', u'attempt', u'third', u'tulip', u'jungle', u'wind', u'fig', u'band', u'bone', u'free', u'bouncing', u'boarding', u'tackled', u'__background__', u'gymnasium', u'gymnastic apparatus', u'pineapple', u'folded', u'rice', u'sunglasses', u'cushion', u'net', u'covering', u'pretzel', u'steam', u'santum', u'fair', u'sail', u'score', u'toothbrush', u'loaded', u'fry', u'life', u'glider', u'balance', u'cone', u'containing', u'beside', u'wheel', u'rain', u'spaghetti squash', u'thi', u'left', u'photographer', u'forested', u'vanity', u'shoulder', u'pavement', u'officer', u'creek', u'dead', u'slide rule', u'dunking', u'horizon', u'raised', u'fabric', u'fight', u'way', u'war', u'landing', u'umpire', u'fashioned', u'dimly', u'topped', u'setting', u'sling', u'potato', u'bottom', u'dance', u'crocodile', u'ice', u'string', u'dig', u'gun', u'tarmac', u'falling', u'french', u'wait', u'decker', u'earphone', u'chip', u'get', u'staircase', u'wakeboarder', u'wheelchair', u'pulled', u'polouse', u'still', u'curly', u'scaling', u'lunch', u'meat loaf', u'shown', u'opened', u'space', u'mess', u'headband', u'place', u'pelican', u'ring', u'sheet', u'bite', u'hug', u'wide', u'lick', u'pastry', u'breakfast', u'take', u'topping', u'multiple', u'knee', u'bicyclist', u'sale', u'professional', u'german', u'snack', u'stair', u'ping-pong ball', u'snowsuit', u'sport', u'tackling', u'skyscraper', u'checkered', u'restroom', u'tour', u'nearby', u'foggy', u'bmx', u'newspaper', u'mound', u'chopstick', u'foam', u'driven', u'passing', u'mohawk', u'rest', u'instrument', u'chainsaw', u'towel', u'facing', u'audience', u'laughing', u'served', u'clau', u'diving', u'go-kart', u'tube', u'throw', u'harness', u'strip', u'racquet', u'prepare', u'low', u'pitcher', u'cardoon', u'pull', u'arranged', u'strawberry', u'deep', u'cream', u'rubber', u'trash', u'midair', u'peak', u'remote', u'suburban', u'disc', u'follow', u'potpie', u'gymnast', u'enjoying', u'stool', u'leaping', u'action', u'taken', u'flag', u'mounted', u'grill', u'wrestler', u'marble', u'pony', u'backpacking', u'breaking', u'fungus', u'shade', u'egg', u'style', u'carpeted', u'sauce', u'snowball', u'abacus', u'foreground', u'base', u'circuit', u'leading', u'airborne', u'hotel', u'leotard', u'kind', u'double', u'scabbard', u'bride', u'stall', u'blond', u'cave', u'zucchini', u'electric', u'cigarette', u'sponsored', u'shepherd', u'dandelion', u'catcher', u'movie', u'recently', u'floaty', u'chambered nautilus', u'hitting', u'racer', u'leaning', u'chase', u'funny', u'used', u'snail', u'pomegranate', u'cucumber', u'stack', u'center', u'grind', u'bin', u'formal', u'shaped', u'signal', u'parade', u'bounce', u'step', u'plaza', u'range', u'slouse', u'block', u'downhill', u'jockey', u'retrieving', u'atop', u'cloth', u'crane', u'skull', u'rainy', u'tarp', u'black-footed ferret', u'nice', u'prepared', u'hot pot', u'land', u'fresh', u'hello', u'wrestle', u'kitty', u'spoon', u'muzzled', u'rack', u'smaller', u'hose', u'giving', u'attire', u'leaving', u'chiton', u'limb', u'singing', u'frog', u'crab', u'porch', u'donut', u'crossed', u'tied', u'tomato', u'chasing', u'scenic', u'beneath', u'shaking', u'boarder', u'hippopotamus', u'wading', u'sea_anemone', u'wrapped', u'shallow', u'steep', u'bagel', u'gather', u'pipe', u'construction', u'painting', u'chicken', u'jar', u'bug', u'finger', u'handle', u'beam', u'bean', u'whilst', u'contain', u'costume', u'frame', u'shake', u'attempting', u'merry', u'yawning', u'sniff', u'swimmer', u'muddy', u'commuter', u'bull', u'smoking', u'plain', u'cross', u'member', u'binoculars', u'underneath', u'well', u'fighting', u'bandanna', u'rocket', u'pay-phone', u'five', u'puppy', u'like', u'campfire', u'saddle', u'hi', u'bun', u'ha', u'partially', u'flip', u'placed', u'bearing', u'pinatum', u'pie', u'boardwalk', u'pit', u'star', u'baked', u'smoke', u'hospital', u'type', u'hole', u'wand', u'chocolate sauce', u'haired', u'onto', u'drawn', u'wear', u'loaf of bread', u'beer', u'mushroom', u'lift', u'make', u'mother', u'cowboy', u'fork', u'otter', u'playpen', u'alone', u'hamburger', u'bottlecap', u'soup', u'cutter', u'square', u'friend', u'scuba', u'hockey', u'wheelie', u'picnic', u'tug', u'squash', u'case', u'inflatable', u'railroad', u'competition', u'slice', u'broken', u'jeep', u'trying', u'apartment', u'chewing', u'grasshopper', u'guacamole', u'splash', u'male', u'dishrag', u'kayaking', u'acorn', u'snowbank', u'clean', u'hit', u'batter', u'kick', u'jewelry', u'fighter', u'cooked', u'putting', u'try', u'wallet', u'mustache', u'artichoke', u'spaghetti sauce', u'crossing', u'retriever', u'veggy', u'produce', u'darkened', u'kiddie', u'mashed potato', u'closed', u'canopy', u'runway', u'vintage', u'fishing', u'doughnut', u'onion', u'leap', u'rodeo', u'cricket', u'made', u'closeup', u'chew', u'sliced', u'hot', u'deck', u'French horn', u'clothe', u'goggle', u'rowing', u'milk can', u'post', u'outstretched', u'chocolate', u'making', u'course', u'hula', u'carry', u'upside', u'desktop', u'lobster', u'suitcase', u'crossbone', u'ty', u'sea slug', u'polecat', u'sandwich', u'racetrack', u'lettuce', u'cockroach', u'toward', u'eaten', u'blender', u'giant', u'atv', u'big', u'holster', u'splashing', u'commercial', u'tunnel', u'bend', u'meter', u'including', u'badger', u'beach wagon', u'beard', u'beak', u'controller', u'match', u'buckle', u'hiker', u'barometer', u'bread', u'serve', u'object', u'stadium', u'tank', u'waterfall', u'stream', u'neck', u'serving', u'manhole cover', u'pitch', u'pistol', u'dribble', u'isopod', u'transit', u'dragonfly', u'huge', u'backyard', u'foot', u'jet', u'dancing', u'custard apple', u'porcupine', u'assorted', u'rope', u'cut', u'showing', u'lemon', u'armadillo', u'salad', u'carrot', u'biting', u'bee', u'hammer', u'lens cap', u'cauliflower', u'kicking', u'denim', u'marmot', u'nintendo', u'fireplace', u'landscape', u'turn', u'hoop', u'wedding', u'eggnog', u'antique', u'bow', u'winter', u'stacked', u'purse', u'beaver', u'kneeling', u'island', u'slot machine', u'Christmas stocking', u'public', u'narrow', u'ladybug', u'stopped', u'burrito', u'necked', u'cheese', u'crayfish', u'single', u'getting', u'tan', u'lined', u'handstand', u'letter opener', u'pencil box', u'doorway', u'leafhopper', u'residential', u'slug', u'eat', u'carriage', u'end', u'lap', u'distance', u'mink', u'sleeping bag', u'time', u'container', u'stunt', u'drife', u'broccoli', u'docked', u'structure', u'cooker', u'go', u'aircraft carrier', u'pudding', u'tape player', u'outfit', u'coaster', u'reaching', u'meat', u'splashed', u'hair slide', u'roller', u'submarine', u'toaster', u'dining', u'rotisserie', u'football', u'spindle', u'christma', u'thimble', u'giant panda', u'pedestrian', u'compass', u'squirrel', u'sea urchin', u'hotdog', u'peach', u'warplane', u'oil filter', u'waiting', u'hip', u'jaguar', u'mortar', u'gear', u'sprinkler', u'beer bottle', u'gondola', u'half', u'stainless', u'military', u'electronic', u'bat', u'handrail', u'perform', u'coffee maker', u'flat', u'round', u'meal', u'telephone', u'pool table', u'seagull', u'hermit crab', u'fancy', u'obstacle', u'honeycomb', u'gravel', u'ladle', u'farm', u'crossword puzzle', u'steel', u'drink', u'pepper', u'tongue', u'owl', u'rule', u'gym', u'seated', u'monotreme', u'cattle', u'water tower', u'vegetable', u'eel', u'variou', u'messy', u'raft', u'castle', u'fire', u'bib', u'skunk', u'gray', u\"carpenter's kit\", u'wombat', u'carnival', u'equipment', u'mousetrap', u'joystick', u'golf ball', u'shoji', u'banana', u'clear', u'sloth', u'glove', u'reel', u'desert', u'necklace', u'ear', u'digging', u'rural', u'asian', u'school', u'wreck', u'coffee', u'hydrant', u'mouse', u'mid', u'row', u'puddle', u'engine', u'mongoose', u'stopwatch', u'walkway', u'past', u'beacon', u'koala', u'lip', u'gold', u'scooter', u'puck', u\"potter's wheel\", u'ly', u'oilseed', u'tire', u'drum', u'party', u'radio telescope', u'worm', u'lay', u'magnetic disk', u'bar', u'butterfly', u'dinner', u'birthday', u'power drill', u'saltshaker', u'thing', u'ant', u'lantern', u'hard', u'weasel', u'ridden', u'paddlewheel', u'drilling platform', u'climber', u'safe', u'shower', u'airship', u'cassette player', u'printer', u'wooden spoon', u'bassoon', u'reflection', u'scissor', u'apiary', u'ice cream', u'rider', u'boathouse', u'mud', u'corn', u'guinea pig', u'snow leopard', u'mailbox', u'cement', u'bakery', u'taking', u'variety', u'swan', u'velvet', u'couple', u'fridge', u'strainer', u'dirty', u'screwdriver', u'jigsaw puzzle', u'device', u'alligator', u'oven', u'silver', u'urban', u'country', u'opener', u'leather', u'barrel', u'duck', u'drumstick', u'cake', u'ambulance', u'pencil sharpener', u'barrier', u'safety pin', u'right', u'baseball', u'beetle', u'ax', u'cassette', u'assortment', u'entree', u'armor plate', u'going', u'cart', u'can opener', u'curve', u'pointing', u'dribbling', u'sock', u'home', u'catching', u'church', u'mosque', u'measuring cup', u'striped', u'throne', u'skating', u'sundial', u'CD player', u'grille', u'brushing', u'jersey', u'plunger', u'conch', u'several', u'shaker', u'tile', u'stretcher', u'tower', u'plane', u'salamander', u'lock', u'platform', u'airport', u'hamster', u'graffiti', u'jackfruit', u'cabbage', u'blowing', u'kitten', u'yurt', u'cannon', u'powder', u'sea cucumber', u'sea cow', u'dinosaur', u'racing', u'primate', u'wii', u'skateboarding', u'blanket', u'mug', u'cap', u'challenging', u'throwing', u'library', u'quill', u'trench coat', u'microwave', u'tusker', u'cluttered', u'apple', u'duplicator', u'broom', u'wet', u'altar', u'show', u'heater', u'radiator', u'cargo ship', u'spatula', u'screw', u'neck brace', u'flute', u'peacock', u'sewing machine', u'reading', u'dough', u'rifle', u'long', u'penguin', u'playground', u'photograph', u'luggage', u'plow', u'item', u'factory', u'starfish', u'fire engine', u'locomotive', u'piggy bank', u'empty', u'scale', u'plate rack', u'graze', u'cutting', u'feeding', u'cooking', u'rapid', u'ledge', u'business', u'colored', u'forklift', u'boot', u'wing', u'remote control', u'trampoline', u'gas pump', u'space bar', u'snorkel', u'book', u'microscope', u'rain barrel', u'pair', u'Old World buffalo', u'airplane', u'creature', u'knee pad', u'whale', u'birdhouse', u'oxygen mask', u'bag', u'sailboat', u'mat', u'town', u'using', u'rugby ball', u'staring', u'shopping basket', u'binder', u'team', u'sailing vessel', u'ox', u'leopard', u'shield', u'full', u'Band Aid', u'mountaintop', u'crate', u'modem', u'family', u'tennis ball', u'barn', u'work', u'formation', u'barrow', u'goose', u'syringe', u'soap dispenser', u'kite', u'appliance', u'solar dish', u'lizard', u'paddling', u'cardigan', u'sink', u'control', u'toddler', u'mortarboard']\n\n useless_words = ['holding','hold' ,'wearing', 'wear' , 'standing','sitting', 'stand', 'sit' , 'smiling', 'smile', 'clothing', 'shirt', \"next\", 'posing', 'playing']\n abstract_words = ['beautiful', 'young']\n color_words = ['black', 'white', 'red', 'blue', 'brown']\n\n if choice == 1:\n return STOPWORDS\n\n STOPWORDS += unselected_words\n STOPWORDS += useless_words\n STOPWORDS += low_acc_words\n #STOPWORDS += color_words\n #STOPWORDS += abstract_words\n return STOPWORDS", "def test_extra_default_codeword(self):\n self.alice.add_codeword(\"flugelhorn\", \"ranged\")\n\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 1, troop_type='muppet')\n self.assertEqual(s1.troop_type, \"infantry\")", "def response_handling(self) -> global___Snippet.SimpleResponseHandling:", "def response_handling(self) -> global___Snippet.SimpleResponseHandling:", "def my_word_example_handler(handler_input):\n # type: (HandlerInput) -> Response\n slots = handler_input.request_envelope.request.intent.slots\n\n if example_slot in slots:\n curr_word = slots[example_slot].value\n handler_input.attributes_manager.session_attributes[\n example_slot_key] = curr_word\n\n try:\n response = http_get(curr_word, False)\n\n if response:\n example = response[0]['def'][0]['sseq'][0][0][1]['dt'][1][0]\n if example == \"vis\":\n vis = remove_italics(response[0]['def'][0]['sseq'][0][0][1]['dt'][1][1][0]['t'])\n speech = (\"An example with {} (part of speech {}) \"\n \"is: {}\".format(curr_word, response[0]['fl'],\n vis))\n elif example == \"wsgram\":\n vis = remove_italics(response[0]['def'][0]['sseq'][0][0][1]['dt'][2][1][0]['t'])\n speech = (\"An example with {} (part of speech {}) \"\n \"is: {}\".format(curr_word, response[0]['fl'],\n vis))\n else:\n speech = (\"No example is available for {}\").format(curr_word)\n reprompt = (\"What word would you like me to look up?\")\n else:\n speech = (\"No example is available for {}\").format(curr_word)\n reprompt = (\"What word would you like me to look up?\")\n except Exception as e:\n speech = (\"No example is available for {}. \"\n \"Can I look up another word?\").format(curr_word)\n reprompt = (\"What word would you like me to look up?\")\n else:\n speech = \"I'm not sure what word to look up, please try again\"\n reprompt = (\"I didn't catch that. What word would you like me \"\n \"me to look up?\")\n\n handler_input.attributes_manager.session_attributes[previous_key] = speech\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response", "def my_word_example_handler(handler_input):\n # type: (HandlerInput) -> Response\n slots = handler_input.request_envelope.request.intent.slots\n\n if synonym_slot in slots:\n curr_word = slots[synonym_slot].value\n handler_input.attributes_manager.session_attributes[\n synonym_slot_key] = curr_word\n\n try:\n synonyms = http_get(curr_word, True)\n\n if type(synonyms[0]) == dict:\n speech = (\"A synonym for {} is {}\".format(curr_word,\n synonyms[0]['meta']['syns'][0][0]))\n synonym_list = synonyms[0]['meta']['syns'][0]\n reprompt = (\"What word would you like a synonym for?\")\n else:\n speech = (\"No synonyms for {} are available. \"\n \"Can I look up another word?\").format(curr_word)\n reprompt = (\"What word would you like a synonym for?\")\n except:\n speech = (\"No synonyms for {} are available. \"\n \"Can I look up another word?\").format(curr_word)\n reprompt = (\"What word would you like a synonym for?\")\n else:\n speech = \"I'm not sure what word to find a synonym for, please try again\"\n reprompt = (\"I didn't catch that. What word would you like me \"\n \"me to look up a synonym for?\")\n\n handler_input.attributes_manager.session_attributes[previous_key] = speech\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response", "def test_codeword(self):\n self.assertEqual(self.sess.query(db.CodeWord).count(), 0)\n self.alice.add_codeword('muppet', 'ranged')\n self.assertEqual(self.sess.query(db.CodeWord).count(), 1)\n\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 1, troop_type='muppet')\n self.assertEqual(s1.troop_type, \"ranged\")\n\n self.alice.remove_codeword('muppet')\n self.assertEqual(self.sess.query(db.CodeWord).count(), 0)\n s2 = s1.react(self.alice, 1, hinder=False, troop_type='muppet')\n self.assertEqual(s2.troop_type, 'infantry')", "def sentiment():\n\n request_json = request.json\n power = request_json['power']\n angle = request_json['angle']\n\n print(power, angle)\n\n resp_dict = dict()\n resp_dict['kick'] = 'ok'\n\n resp = Response(json.dumps(resp_dict), status=200)\n\n return resp", "def get_response(text: str):\n # Step 01: Initialize the response.\n response = dict()\n results = dict()\n\n vectorized_text = dict()\n vectorized_text['test'] = (PredictionService.__vc.transform([text])) # see options in the above cell\n\n print ('DONE - [EMBEDDING] Apply Chosen Embeddings to the Tweets')\n # Step 02: Predict the label/class of the received text.\n predicted_sentiment = PredictionService.__model.predict(vectorized_text['test']).tolist()\n\n # Step 03: Parse the prediction result.\n if (predicted_sentiment[0] == 1):\n results[\"label\"] = \"Relevant\"\n else:\n results[\"label\"] = \"Not Relevant\"\n\n # Step 04: Prepare the response.\n response[\"status\"] = 200\n response[\"results\"] = results\n\n # Step 05: Return the response.\n return response", "def meaning_of(word, app_id, app_key):\n \n url = 'https://od-api.oxforddictionaries.com:443/api/v1/entries/' + language + '/' + word.lower()\n r = requests.get(url, headers={\"app_id\":app_id, \"app_key\":app_key})\n\n data = r.json()\n useful_data = {}\n\n for i in data['results'][0]['lexicalEntries'][0]['entries']:\n for j in i:\n for k in i[j][0]:\n try:\n subdata = i[j][0][k]\n if k == 'subsenses':\n useful_data.update({\"meanings\":subdata[0]['definitions']})\n elif k == 'examples':\n useful_data.update({\"examples\":subdata[0]['text']})\n else:\n pass\n except:\n pass\n return useful_data", "def sentence():\n res = random.choice(no_result)\n adr = random.choice(address)\n wik = random.choice(wiki)\n return {'res': res, 'adr': adr, 'wik': wik}", "def why_handler(self, data, suffix=''):\n # Just to show data coming in...\n assert data['requested'] == 'inspiration'\n\n return {'quote': self.get_random_inspirational_quote()}", "def test_word_rhyme_bad_request(self):\n word = \"Not a single Term \"\n rv = self.rhymeWords(input_word=word)\n expected_output = {\n \"code\": 400,\n \"message\": \"A Term must be only a single word\"\n }\n response_data = json.loads(rv.get_data(as_text=True))\n\n self.assertEquals(rv.status_code, 400)\n self.assertEquals(response_data[\"code\"], expected_output[\"code\"])\n self.assertEquals(response_data[\"message\"], expected_output[\"message\"])", "def test_short_words_are_removed(self):\n trait_1 = factories.HarmonizedTraitFactory.create(i_description='lorem ipsum')\n trait_2 = factories.HarmonizedTraitFactory.create(i_description='lorem')\n response = self.client.get(self.get_url(), {'description': 'lorem ip'})\n context = response.context\n self.assertIn('form', context)\n self.assertTrue(context['has_results'])\n self.assertIsInstance(context['results_table'], tables.HarmonizedTraitTable)\n self.assertEqual(len(context['results_table'].rows), 2)\n self.assertIn(trait_1, context['results_table'].data)\n self.assertIn(trait_2, context['results_table'].data)", "async def dict(self, ctx, *keywords):\n\n if not keywords:\n embed = discord.Embed(title='{}:'.format(ctx.message.author.name),\n description='Did you tried `{}help dict` yet?'.format(self.config['prefix']),\n colour=0xf20006)\n a = await self.bot.say(embed=embed)\n await self.bot.add_reaction(a, self.emojiUnicode['error'])\n return\n if keywords:\n old_keyword = \" \".join(keywords)\n try:\n keywords = \"%20\".join(keywords)\n url = 'http://api.urbandictionary.com/v0/define?term={}'.format(keywords)\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n source = await response.json(encoding='utf8')\n\n source = json.dumps(source, indent=2)\n result = json.loads(str(source))\n embed = discord.Embed(title='{}:'.format(ctx.message.author.name),\n description='Your search tag was:\\n***`{}`***'.format(old_keyword),\n colour=0xf20006)\n embed.add_field(name='Word:', value='`{}`'.format(result['list'][0]['word']), inline=False)\n embed.add_field(name='Definition:', value='```{}```'.format(result['list'][0]['definition']), inline=False)\n embed.add_field(name='example:', value='```{}```'.format(result['list'][0]['example']), inline=True)\n embed.add_field(name='Author:', value='`{}`'.format(result['list'][0]['author']), inline=False)\n embed.add_field(name='Link:', value='{}'.format(result['list'][0]['permalink']), inline=False)\n embed.add_field(name='Likes:', value='\\U0001f44d `{}`'.format(result['list'][0]['thumbs_up']),\n inline=True)\n embed.add_field(name='Dislikes:', value='\\U0001f44e `{}`'.format(result['list'][0]['thumbs_down']),\n inline=True)\n\n\n a = await self.bot.say(embed=embed)\n await self.bot.add_reaction(a, self.emojiUnicode['succes'])\n except Exception as e:\n embed = discord.Embed(title='{}:'.format(ctx.message.author.name),\n description='Your search tag was:\\n***`{}`***\\n\\nNothing found :sailboat:'.format(old_keyword, self.config['prefix']),\n colour=0xf20006)\n a = await self.bot.say(embed=embed)\n await self.bot.add_reaction(a, self.emojiUnicode['warning'])", "def rhymeWords(self, input_word):\n return self.app.get('/words/1.0/rhyme/' + input_word, follow_redirects=True, headers=self.headers)", "def test_word_info_bad_word(self):\n word = \"hdiasudhisuahdiasushdiaushdiaushdiasuhdisauh\"\n rv = self.wordInfo(input_word=word)\n expected_output = {\n word: {\n \"frequency\": None,\n \"defination\": None,\n \"antonyms\": None,\n \"examples\": None,\n \"pronounciation\": None,\n \"synonyms\": None\n }\n }\n response_data = json.loads(rv.get_data(as_text=True))\n\n self.assertEquals(rv.status_code, 200)\n self.assertEquals(response_data[word][\"defination\"], expected_output[word][\"defination\"])\n self.assertEquals(response_data[word][\"antonyms\"], expected_output[word][\"antonyms\"])\n self.assertEquals(response_data[word][\"examples\"], expected_output[word][\"examples\"])\n self.assertEquals(response_data[word][\"frequency\"], expected_output[word][\"frequency\"])\n self.assertEquals(response_data[word][\"pronounciation\"], expected_output[word][\"pronounciation\"])\n self.assertEquals(response_data[word][\"synonyms\"], expected_output[word][\"synonyms\"])", "def test_no_cross_codewording(self):\n self.bob.add_codeword('muppet', 'ranged')\n\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 1, troop_type='muppet')\n self.assertEqual(s1.troop_type, \"infantry\")", "async def with_code_header():\n return jsonify(language=request.headers.get(\"Lang\")), 203, {\"X\": 233}", "def get_fact_response():\n randomi = random.randint(1, 7)\n count=0\n sequema= SE_QUEMA\n for x in range(1, randomi):\n sequema += SE_QUEMA\n if (x%2)==0:\n sequema += \"<audio src='https://s3.amazonaws.com/papa-caliente/tictoc.mp3'/>\"\n speechOutput = LA_PAPA + sequema + SE_QUEMO + \"<break time='1.3s'/>\"+ HELP_REPROMPT\n\n return response(speech_response_ssml(speechOutput, False))", "def simulate_response(self, documents):", "def substantiate():", "def __call__(self, word):\n return self.parse_request(self.request(f\"https://www.dictionaryapi.com/api/v3/references/collegiate/json/{word}?key={self.apikey}\"), word)", "def main_response(self, data):", "def main_response(self, data):" ]
[ "0.5700817", "0.5638408", "0.5615774", "0.5550162", "0.55433565", "0.5511571", "0.54545414", "0.54381716", "0.54381716", "0.5403765", "0.5395792", "0.53828865", "0.5371855", "0.535695", "0.5337701", "0.53147566", "0.5295624", "0.52706325", "0.5251627", "0.5242964", "0.5225923", "0.5212517", "0.5199666", "0.51930124", "0.5183541", "0.51798844", "0.51785827", "0.5161006", "0.51595616", "0.51595616" ]
0.6663941
0
Can't oppose a fight in a sector you're not in
def test_no_oppose_different_sectors(self): battle = self.battle self.bob.sector = 7 s1 = battle.create_skirmish(self.alice, 2) prev_skirmishes = self.sess.query(db.SkirmishAction).count() with self.assertRaises(db.WrongSectorException): s1.react(self.bob, 2) now_skirmishes = self.sess.query(db.SkirmishAction).count() self.assertEqual(prev_skirmishes, now_skirmishes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_no_support_different_sectors(self):\n battle = self.battle\n self.carol.sector = 7\n\n s1 = battle.create_skirmish(self.alice, 2)\n prev_skirmishes = self.sess.query(db.SkirmishAction).count()\n with self.assertRaises(db.WrongSectorException):\n s1.react(self.carol, 2, hinder=False)\n now_skirmishes = self.sess.query(db.SkirmishAction).count()\n self.assertEqual(prev_skirmishes, now_skirmishes)", "def test_cant_start_fight_in_sector_zero(self):\n battle = self.battle\n self.alice.sector = 0\n\n prev_skirmishes = self.sess.query(db.SkirmishAction).count()\n with self.assertRaises(db.NoSuchSectorException):\n battle.create_skirmish(self.alice, 2)\n now_skirmishes = self.sess.query(db.SkirmishAction).count()\n self.assertEqual(prev_skirmishes, now_skirmishes)", "def test_allow_sector_movement_in_battle(self):\n self.conf[\"game\"][\"num_sectors\"] = 7\n self.conf[\"game\"][\"allow_sector_retreat\"] = True\n self.battle.create_skirmish(self.alice, 1)\n curr = self.alice.region\n\n self.alice.move(100, curr, 15, sector=3, conf=self.conf)\n\n n = (self.sess.query(db.MarchingOrder).\n filter_by(leader=self.alice)).count()\n self.assertEqual(n, 1)", "def fight(self, combat_item):\r\n print(self.name + \" doesn't want to fight with you\")\r\n return True", "def combat(self, entite1, entite2, gameZone):\n degat = entite1.attaque*(entite1.vie//10)\n degat += randint(0, degat)\n degatContreAttaque = entite2.attaque*(entite2.vie//30)\n degatContreAttaque += randint(0, degatContreAttaque)*2\n entite1.setCanAttack(False)\n if entite2.vie - degat <= 0:\n #entite2 contre attaque et meurt\n entite2.parent.removeEntite(entite2)\n gameZone.supprimerEntite(entite2)\n else:\n #entite2 contre attaque et survie\n entite2.vie -= degat\n #contre attaque\n if entite1.vie - degatContreAttaque <= 0:\n #entite1 meurt\n if not entite1.nom == \"Archer\":\n entite1.parent.removeEntite(entite1)\n gameZone.supprimerEntite(entite1) \n else:\n #entite1 un prend des degats et survie\n if not entite1.nom == \"Archer\":\n entite1.vie -= degatContreAttaque", "async def fight(self, ctx):\n\n # init\n player = Player(ctx, self.client, ctx.message.author)\n checker = Fight_checker()\n\n # check if the player is in a fight\n if player.id in checker.in_fight:\n checker.in_fight.remove(player.id)\n\n await ctx.send(\"You are no longer in a fight.\")\n \n else:\n await ctx.send(\"You're not in a fight.\")", "def playerdefeated(self):\n globalvalues.gameover_combat()", "def student_flee_attack(player: Character, opponent: Student):\n opponent.student_damage = opponent.get_student_flee_attack_damage()\n player.hp -= opponent.student_damage\n print(\"\\nStudent sucker punched you before you ran away!\")\n print(\"Student did \" + str(opponent.student_damage) + \" damage to \" + player.name.title())\n print(player.name.title() + \" has \" + str(player.hp) + \" HP left\\n\")", "def game(a,b, ):\n attacker, defender = a, b\n combatround = 0\n while a.hitpoints > 0 and b.hitpoints > 0:\n combatround += 1 # increase combatround by 1\n if a.stunned > 0:\n a.stunned -= 1\n if b.stunned > 0:\n b.stunned -= 1\n print()\n print(\"=================================\")\n print(\"combat round nr:\", combatround)\n print(\"attacker:\", attacker)\n print(\"defender:\", defender)\n print(\"=================================\")\n result = strike(attacker,defender)\n if result == None:\n break\n for line in result:\n print(line)\n if attacker == a and defender ==b:\n attacker, defender = b, a\n else:\n attacker, defender = a, b\n\n # game over \n print(\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\")\n if a.hitpoints > b.hitpoints:\n victor = a.name\n elif b.hitpoints > a.hitpoints :\n victor = b.name\n else:\n print(\"it is a draw\")\n victor = None\n print(\"victor:\", victor)", "def attacker(self):\n step = None\n my_card = self.player.player_step(self.my_hand.get_hand())\n s = 0\n while True:\n self.my_hand.give(my_card, self.table)\n #print()\n print(\"TABLE\")\n print(self.table)\n print(\"#\" * 100)\n other_card = self.enemy.enemy_repel(self.table.get_hand()[s], self.other_hand.get_hand())\n if other_card != None:\n self.other_hand.give(other_card, self.table)\n print(self.table)\n print(\"#\" * 100)\n\n step = 1\n else:\n\n step = 0\n for i in range(len(self.table.get_hand())):\n # self.table.give(self.table.get_hand()[i], self.other_hand)\n self.other_hand.add(self.table.get_hand()[i])\n break\n print(\"Your hand\")\n print(self.my_hand)\n my_card = self.player.toss(self.table.get_hand(), self.my_hand.get_hand())\n if my_card == None:\n break\n s = s + 2\n if step == 1:\n #print()\n print(\"Successful defense\")\n else:\n print(\"To abandon the defense\")\n\n self.table.get_hand().clear()\n\n return step", "def enter_night_club(individual):\n if individual.age > LEGAL_DRINKING_AGE:\n print(\"Allowed to enter.\")\n else:\n print(\"Enterance of minors is denited.\")", "def team_battle(self):\n # deaths1 = 0\n # deaths2 = 0\n #\n # while deaths1 < len(self.team_one.heroes) and deaths2 < len(self.team_two.heroes):\n # self.team_one.attack(self.team_two)\n # self.team_two.attack(self.team_one)\n # if\n # print(\"hi\")\n while self.team_one.find_hero == True and self.team_two.find_hero == True:\n print(\"team_battle\")", "def fight(self, combat_item):\r\n if combat_item == self.weakness:\r\n print(\"You fend \" + self.name + \" off with the \" + combat_item )\r\n Character.victory_count +=1\r\n return True\r\n else:\r\n print(self.name + \" crushes you, puny adventurer\")\r\n return False", "def attack_countries(self, other_country):\n print(\"\\n>>>\\tEncounter enemy!\")\n my_point = self.get_total_cp(self.troop_list, other_country)\n enemy_point = other_country.get_total_cp(other_country.troop_list, self)\n if my_point <= 0:\n return False\n elif enemy_point <= 0:\n return True\n if my_point > enemy_point:\n for key in self.troop_list:\n if self.troop_list[key][0] > 0:\n if self.troop_list[key][0] >= 2:\n self.troop_list[key][0] -= 2\n if self.troop_list[key][1].health > 10:\n self.troop_list[key][1].health -= 10\n if self.troop_list[key][1].attack >= 5:\n self.troop_list[key][1].attack -= 5\n for key in other_country.troop_list:\n if other_country.troop_list[key][0] > 0:\n if other_country.troop_list[key][0] >= 3:\n other_country.troop_list[key][0] -= 3\n if other_country.troop_list[key][1].health > 15:\n other_country.troop_list[key][1].health -= 15\n if other_country.troop_list[key][1].attack >=10:\n other_country.troop_list[key][1].attack -= 10\n if enemy_point >= my_point:\n for key in self.troop_list:\n if self.troop_list[key][0] > 0:\n if self.troop_list[key][0] >= 3:\n self.troop_list[key][0] -= 3\n if self.troop_list[key][1].health > 15:\n self.troop_list[key][1].health -= 15\n if self.troop_list[key][1].attack >= 10:\n self.troop_list[key][1].attack -= 10\n for key in other_country.troop_list:\n if other_country.troop_list[key][0] > 0:\n if other_country.troop_list[key][0] >= 2:\n other_country.troop_list[key][0] -= 2\n if other_country.troop_list[key][1].health > 10:\n other_country.troop_list[key][1].health -= 10\n if other_country.troop_list[key][1].attack >= 5:\n other_country.troop_list[key][1].attack -= 5", "def test_failed_attack(self):\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 10) # Attack 10\n s1.react(self.bob, 10) # --Attack 10\n s1.react(self.dave, 9) # --Attack 9\n\n result = s1.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.bob.team)\n self.assertEqual(result.vp, 10)", "def check_student_death(opponent: Student):\n if opponent.hp <= 0:\n print(\"Student has died.\\n\\n\"\n \"One step closer to the co-op spot..\\n\"\n \"Let's keep moving.\\n\")", "def test_disallow_absent_fighting(self):\n londo = self.get_region(\"Orange Londo\")\n self.alice.region = londo\n self.sess.commit()\n\n with self.assertRaises(db.NotPresentException):\n self.battle.create_skirmish(self.alice, 1)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 0)", "def test_not_unopposed(self):\n s1 = self.battle.create_skirmish(self.alice, 2) # Attack 2\n s1.react(self.bob, 1) # --Attack 1\n s1.resolve()\n self.assertFalse(s1.unopposed)", "def fight(you, boss):\n you_attack = you['damage'] - boss['armor']\n if you_attack < 1:\n you_attack = 1\n boss_attack = boss['damage'] - you['armor']\n if boss_attack < 1:\n boss_attack = 1\n boss_turns = np.ceil(you['hit']/boss_attack)\n you_turns = np.ceil(boss['hit']/you_attack)\n return you_turns <= boss_turns", "def test_gain(self):\n self.plr.piles[Piles.DECK].set(\"Duchy\")\n self.plr.test_input = [\"Get Estate\"]\n self.plr.gain_card(\"Cursed Village\")\n self.assertNotIn(\"Curse\", self.plr.piles[Piles.DISCARD])\n self.assertIsNotNone(self.plr.piles[Piles.DISCARD][\"Estate\"])\n self.assertIn(\"Duchy\", self.g.trashpile)", "def cat_turn():\r\n\tglobal men\r\n\tl = [bat, pounce, legkick]\r\n\tx = randint(0, 3)\r\n\tif men >= 85 and x == 3:\r\n\t\tx = randint(0,2)\r\n\tif x != 3 and men - l[x][5] >= 0:\r\n\t\treturn cat.hit(*l[x])\r\n\telse:\r\n\t\tmen += cat.sleep(*csleep)\r\n\t\treturn 0", "def defender(self):\n step = None\n other_card = self.enemy.enemy_step(self.other_hand.get_hand())\n s = 0\n while True:\n self.other_hand.give(other_card, self.table)\n #print()\n print(\"TABLE\")\n print(self.table)\n print(\"#\" * 100)\n my_card = self.player.player_repel(self.table.get_hand()[s], self.my_hand.get_hand())\n if my_card != None:\n self.my_hand.give(my_card, self.table)\n print(self.table)\n print(\"#\" * 100)\n\n step = 0\n else:\n\n step = 1\n for i in range(len(self.table.get_hand())):\n # self.table.give(self.table.get_hand()[i], self.my_hand)\n self.my_hand.add(self.table.get_hand()[i])\n break\n print(\"Your hand\")\n print(self.my_hand)\n other_card = self.enemy.toss(self.table.get_hand(), self.other_hand.get_hand())\n if other_card == None:\n break\n s = s + 2\n if step == 0:\n #print()\n print(\"Successful defense\")\n else:\n print(\"To abandon the defense\")\n\n self.table.get_hand().clear()\n\n return step", "def check_cheating(self, dice=[]):\n\n #Assume they're not cheating until proven guilty\n self.cheating = False\n\n if self.current_stage == 3:\n if self.die_a not in dice and (self.die_a.value == 6):\n print(\"You're cheating! You cannot lock a 6! You cannot win \"\n \"until you reroll it!\")\n self.cheating = True\n elif self.die_b not in dice and (self.die_b.value == 6):\n print(\"You're cheating! You cannot lock a 6! You cannot win \"\n \"until you reroll it!\")\n self.cheating = True", "async def fight(self, ctx):\r\n attacker = ctx.message.author.name\r\n defenders = ctx.message.mentions\r\n # only continue if valid attacker and defender\r\n attacker_ship = Ship.find_ship(attacker)\r\n if not attacker_ship:\r\n await ctx.send('{0}, you do not have a ship! `$ship` to get one'.format(ctx.message.author.mention))\r\n return\r\n if not defenders:\r\n await ctx.send('Who are you fighting? `$fight @user` to fight someone')\r\n # reset cooldowns when not successful fights\r\n # self.fight.reset_cooldown()\r\n return\r\n elif len(defenders) > 1:\r\n await ctx.send('Who are you fighting? One at a time (for now)')\r\n return\r\n else:\r\n defender = defenders[0].name\r\n\r\n if attacker == defender:\r\n attacker_ship.gold -= 50\r\n if attacker_ship.gold < 0:\r\n attacker_ship.gold = 0\r\n attacker_ship.update()\r\n await ctx.send('A mutiny has started on {0}\\'s ship! The treasure hold has been ransacked! '\r\n '{1} gold was taken.'.format(defender, 50))\r\n return\r\n\r\n defender_ship = Ship.find_ship(defender)\r\n if not defender_ship:\r\n await ctx.send('{0} does not have a ship! There are no fights'\r\n ' on the high sea if there are no ships to fight'.format(defender))\r\n return\r\n\r\n # actually start fight\r\n em = discord.Embed(title='{0} has attacked {1} :rage: '.format(attacker, defender), colour=0xDDDD00)\r\n\r\n # calculate who wins based on their attack and defense plus random number\r\n attacker_ship.repair_hull()\r\n defender_ship.repair_hull()\r\n attacker_msg = ''\r\n defender_msg = ''\r\n\r\n while attacker_ship.hull > 0 and defender_ship.hull > 0:\r\n attack = random.randint(1, 100)\r\n attack += attacker_ship.cannons + attacker_ship.crew\r\n\r\n defense = random.randint(1, 100)\r\n defense += defender_ship.cannons + defender_ship.crew\r\n\r\n defender_ship.damage_hull(attack)\r\n attacker_ship.damage_hull(defense)\r\n\r\n attacker_msg += 'Fired a volley of **{}** cannonballs <a:cannon:554558216889958400> \\n'.format(attack)\r\n defender_msg += '<a:cannon_reversed:554722119905181735> Return fired a volley of **{}** cannonballs \\n'.format(defense)\r\n\r\n\r\n\r\n if attacker_ship.hull > defender_ship.hull: # attacker wins\r\n # base gold at 100, more gold earned for harder fights, less or easier ones\r\n gold = 100 + (defender_ship.level() - attacker_ship.level()) * 2\r\n gold = gold if gold > 0 else 0\r\n attacker_ship.gold += gold\r\n attacker_ship.win += 1\r\n defender_ship.loss += 1\r\n # reset hulls just in case\r\n attacker_ship.repair_hull()\r\n defender_ship.repair_hull()\r\n em.add_field(name=\"__{}__ HP: {}\".format(attacker, attacker_ship.hull), value=attacker_msg, inline=True)\r\n em.add_field(name=\"__{}__ HP: {}\".format(defender, defender_ship.hull), value=defender_msg, inline=True)\r\n attacker_ship.update()\r\n defender_ship.update()\r\n\r\n em.add_field(name='{} is the winner! :crossed_swords:'.format(attacker),\r\n value='<a:treasure_chest:554730061463289857> They earned **{}** gold for their coffers.'.format(gold), inline=False)\r\n\r\n else: # defender wins\r\n defender_ship.win += 1\r\n attacker_ship.loss += 1\r\n # reset hulls just in case\r\n attacker_ship.repair_hull()\r\n defender_ship.repair_hull()\r\n em.add_field(name=\"__{}__ HP: {}\".format(attacker, attacker_ship.hull), value=attacker_msg, inline=True)\r\n em.add_field(name=\"__{}__ HP: {}\".format(defender, defender_ship.hull), value=defender_msg, inline=True)\r\n attacker_ship.update()\r\n defender_ship.update()\r\n em.add_field(name='{} is the winner! :shield:'.format(defender),\r\n value=' <a:armor:554559559545520128> Their ship survives to fight another day.', inline=False)\r\n\r\n await ctx.send(embed=em)", "def opponent_one_attack(opponent_one, opponent_two):\n dex_check_roll = roll_die(1, 20) # Roll a 1d20 to for Dexterity check\n if dex_check_roll > opponent_two['Dexterity']: # If greater than enemy Dexterity stat, do damage\n damage = class_hp(opponent_one['Class']) # Roll corresponding class die to determine damage\n print('You rolled a', dex_check_roll, 'and passed the Dexterity check. You did', damage, 'damage')\n opponent_two['HP'] -= damage # Replace opponent's HP value with the difference\n if opponent_two['HP'] <= 0: # If the HP is less than or equal to 0, then print death message\n print('Enemy has died')\n return opponent_two['HP'] # Return HP so combat_round function knows whether combat has ended\n else:\n print('Enemy now has', opponent_two['HP'], 'HP') # If enemy didn't die, then print remaining HP\n else:\n print('You rolled a', dex_check_roll, 'and failed the Dexterity check') # Prints failed dexterity check roll", "def test_privatize_fountain_card(self):\n g = Game()\n g.add_player(uuid4(), 'p0')\n g.add_player(uuid4(), 'p1')\n\n gs = g\n p0, p1 = gs.players\n\n latrine, insula, statue, road = cm.get_cards(['Latrine', 'Insula', 'Statue', 'Road'])\n p0.fountain_card = latrine\n\n gs_private = g.privatized_game_state_copy('p1')\n p0, p1 = gs_private.players\n\n self.assertEqual(p0.fountain_card, Card(-1))", "def __attack(self, target):\n attack_difference = (Warrior.attack(self, target))\n if attack_difference > 5:\n print(\"Second attack with ANGRY!\")\n Warrior.attack(self, target)\n return None", "def test_canceled_unopposed(self):\n s1 = self.battle.create_skirmish(self.alice, 10) # Attack 10\n s1a = s1.react(self.bob, 8,\n troop_type=\"cavalry\") # --Attack 8 (12)\n s1a.react(self.alice, 6,\n troop_type=\"ranged\") # ----Attack 6 (9)\n s1.resolve()\n self.assertEqual(s1.victor, self.alice.team)\n self.assert_(s1.unopposed)\n\n # Should be 20 VP (double the 10 it'd ordinarily be worth)\n self.assertEqual(s1.vp, 20)", "def test_curse(self):\n self.plr.piles[Piles.DECK].set(\"Estate\")\n self.plr.gain_card(\"Cursed Village\")\n self.assertIsNotNone(self.plr.piles[Piles.DISCARD][\"Curse\"])\n self.assertIn(\"Estate\", self.g.trashpile)", "def yell():\n ground_description_int = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS]\n if ground_description_int != 12:\n printmessage(\"You yell, but nobody hears you.\", 5, CYAN, 1)\n else:\n printmessage(\"You have found the ranger, amd won the game!\", 5, GREEN, 3)\n die(\"ranger\")" ]
[ "0.6505528", "0.6414374", "0.62349933", "0.6039099", "0.6032844", "0.6025839", "0.5965786", "0.5951273", "0.59395003", "0.59286326", "0.5919667", "0.59127593", "0.59070486", "0.5892684", "0.58916175", "0.5877553", "0.5849762", "0.5848409", "0.5803564", "0.5768749", "0.57517856", "0.5744106", "0.5735788", "0.57219607", "0.5702687", "0.56970656", "0.5666984", "0.56659794", "0.5651566", "0.5635607" ]
0.6416511
1
Can't support a fight in a sector you're not in
def test_no_support_different_sectors(self): battle = self.battle self.carol.sector = 7 s1 = battle.create_skirmish(self.alice, 2) prev_skirmishes = self.sess.query(db.SkirmishAction).count() with self.assertRaises(db.WrongSectorException): s1.react(self.carol, 2, hinder=False) now_skirmishes = self.sess.query(db.SkirmishAction).count() self.assertEqual(prev_skirmishes, now_skirmishes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cant_start_fight_in_sector_zero(self):\n battle = self.battle\n self.alice.sector = 0\n\n prev_skirmishes = self.sess.query(db.SkirmishAction).count()\n with self.assertRaises(db.NoSuchSectorException):\n battle.create_skirmish(self.alice, 2)\n now_skirmishes = self.sess.query(db.SkirmishAction).count()\n self.assertEqual(prev_skirmishes, now_skirmishes)", "def fight(self, combat_item):\r\n print(self.name + \" doesn't want to fight with you\")\r\n return True", "def choose_attack(self, board): \n raise Exception(\"Unimplemented\")", "def test_allow_sector_movement_in_battle(self):\n self.conf[\"game\"][\"num_sectors\"] = 7\n self.conf[\"game\"][\"allow_sector_retreat\"] = True\n self.battle.create_skirmish(self.alice, 1)\n curr = self.alice.region\n\n self.alice.move(100, curr, 15, sector=3, conf=self.conf)\n\n n = (self.sess.query(db.MarchingOrder).\n filter_by(leader=self.alice)).count()\n self.assertEqual(n, 1)", "def test_disallow_fighting_retreat(self):\n londo = self.get_region(\"Orange Londo\")\n self.alice.move(100, londo, 60 * 60 * 24)\n\n with self.assertRaises(db.InProgressException):\n self.battle.create_skirmish(self.alice, 1)", "def test_disallow_absent_fighting(self):\n londo = self.get_region(\"Orange Londo\")\n self.alice.region = londo\n self.sess.commit()\n\n with self.assertRaises(db.NotPresentException):\n self.battle.create_skirmish(self.alice, 1)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 0)", "def test_no_oppose_different_sectors(self):\n battle = self.battle\n self.bob.sector = 7\n\n s1 = battle.create_skirmish(self.alice, 2)\n prev_skirmishes = self.sess.query(db.SkirmishAction).count()\n\n with self.assertRaises(db.WrongSectorException):\n s1.react(self.bob, 2)\n\n now_skirmishes = self.sess.query(db.SkirmishAction).count()\n self.assertEqual(prev_skirmishes, now_skirmishes)", "async def fight(self, ctx):\n\n # init\n player = Player(ctx, self.client, ctx.message.author)\n checker = Fight_checker()\n\n # check if the player is in a fight\n if player.id in checker.in_fight:\n checker.in_fight.remove(player.id)\n\n await ctx.send(\"You are no longer in a fight.\")\n \n else:\n await ctx.send(\"You're not in a fight.\")", "def test_failed_attack(self):\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 10) # Attack 10\n s1.react(self.bob, 10) # --Attack 10\n s1.react(self.dave, 9) # --Attack 9\n\n result = s1.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.bob.team)\n self.assertEqual(result.vp, 10)", "def test_bad_attack_types(self):\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 10) # Attack 10 infantry\n s1.react(self.bob, 10, troop_type='ranged') # --Attack 10 ranged\n\n # Ranged should get a 50% penalty here, for a total of 10/2 = 5\n # So Alice should win by 5 despite lesser numbers\n result = s1.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.alice.team)\n self.assertEqual(result.margin, 5)\n self.assertEqual(result.vp, 10)\n\n s2 = battle.create_skirmish(self.bob, 10, # attack 10 ranged\n troop_type='ranged')\n s2.react(self.alice, 10, troop_type='cavalry') # -- oppose 10 cavalry\n result = s2.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.bob.team)\n self.assertEqual(result.margin, 5)\n self.assertEqual(result.vp, 10)\n\n s3 = battle.create_skirmish(self.carol, 10, # Attack 10 cavalry\n troop_type='cavalry')\n s3.react(self.bob, 10) # -- oppose 10 infantry\n result = s3.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.carol.team)\n self.assertEqual(result.margin, 5)\n self.assertEqual(result.vp, 10)", "async def fight(self, ctx):\r\n attacker = ctx.message.author.name\r\n defenders = ctx.message.mentions\r\n # only continue if valid attacker and defender\r\n attacker_ship = Ship.find_ship(attacker)\r\n if not attacker_ship:\r\n await ctx.send('{0}, you do not have a ship! `$ship` to get one'.format(ctx.message.author.mention))\r\n return\r\n if not defenders:\r\n await ctx.send('Who are you fighting? `$fight @user` to fight someone')\r\n # reset cooldowns when not successful fights\r\n # self.fight.reset_cooldown()\r\n return\r\n elif len(defenders) > 1:\r\n await ctx.send('Who are you fighting? One at a time (for now)')\r\n return\r\n else:\r\n defender = defenders[0].name\r\n\r\n if attacker == defender:\r\n attacker_ship.gold -= 50\r\n if attacker_ship.gold < 0:\r\n attacker_ship.gold = 0\r\n attacker_ship.update()\r\n await ctx.send('A mutiny has started on {0}\\'s ship! The treasure hold has been ransacked! '\r\n '{1} gold was taken.'.format(defender, 50))\r\n return\r\n\r\n defender_ship = Ship.find_ship(defender)\r\n if not defender_ship:\r\n await ctx.send('{0} does not have a ship! There are no fights'\r\n ' on the high sea if there are no ships to fight'.format(defender))\r\n return\r\n\r\n # actually start fight\r\n em = discord.Embed(title='{0} has attacked {1} :rage: '.format(attacker, defender), colour=0xDDDD00)\r\n\r\n # calculate who wins based on their attack and defense plus random number\r\n attacker_ship.repair_hull()\r\n defender_ship.repair_hull()\r\n attacker_msg = ''\r\n defender_msg = ''\r\n\r\n while attacker_ship.hull > 0 and defender_ship.hull > 0:\r\n attack = random.randint(1, 100)\r\n attack += attacker_ship.cannons + attacker_ship.crew\r\n\r\n defense = random.randint(1, 100)\r\n defense += defender_ship.cannons + defender_ship.crew\r\n\r\n defender_ship.damage_hull(attack)\r\n attacker_ship.damage_hull(defense)\r\n\r\n attacker_msg += 'Fired a volley of **{}** cannonballs <a:cannon:554558216889958400> \\n'.format(attack)\r\n defender_msg += '<a:cannon_reversed:554722119905181735> Return fired a volley of **{}** cannonballs \\n'.format(defense)\r\n\r\n\r\n\r\n if attacker_ship.hull > defender_ship.hull: # attacker wins\r\n # base gold at 100, more gold earned for harder fights, less or easier ones\r\n gold = 100 + (defender_ship.level() - attacker_ship.level()) * 2\r\n gold = gold if gold > 0 else 0\r\n attacker_ship.gold += gold\r\n attacker_ship.win += 1\r\n defender_ship.loss += 1\r\n # reset hulls just in case\r\n attacker_ship.repair_hull()\r\n defender_ship.repair_hull()\r\n em.add_field(name=\"__{}__ HP: {}\".format(attacker, attacker_ship.hull), value=attacker_msg, inline=True)\r\n em.add_field(name=\"__{}__ HP: {}\".format(defender, defender_ship.hull), value=defender_msg, inline=True)\r\n attacker_ship.update()\r\n defender_ship.update()\r\n\r\n em.add_field(name='{} is the winner! :crossed_swords:'.format(attacker),\r\n value='<a:treasure_chest:554730061463289857> They earned **{}** gold for their coffers.'.format(gold), inline=False)\r\n\r\n else: # defender wins\r\n defender_ship.win += 1\r\n attacker_ship.loss += 1\r\n # reset hulls just in case\r\n attacker_ship.repair_hull()\r\n defender_ship.repair_hull()\r\n em.add_field(name=\"__{}__ HP: {}\".format(attacker, attacker_ship.hull), value=attacker_msg, inline=True)\r\n em.add_field(name=\"__{}__ HP: {}\".format(defender, defender_ship.hull), value=defender_msg, inline=True)\r\n attacker_ship.update()\r\n defender_ship.update()\r\n em.add_field(name='{} is the winner! :shield:'.format(defender),\r\n value=' <a:armor:554559559545520128> Their ship survives to fight another day.', inline=False)\r\n\r\n await ctx.send(embed=em)", "def fight(self, combat_item):\r\n if combat_item == self.weakness:\r\n print(\"You fend \" + self.name + \" off with the \" + combat_item )\r\n Character.victory_count +=1\r\n return True\r\n else:\r\n print(self.name + \" crushes you, puny adventurer\")\r\n return False", "def event11512150():\n header(11512150, 1)\n ally, = define_args('i')\n if_event_flag_on(1, EVENT.DarkAnorLondo)\n if_entity_attacked_by(1, ally, CHR.Player)\n if_condition_true(0, 1)\n wait(1.0) # You have to attack them twice.\n if_entity_attacked_by(0, ally, CHR.Player)\n chr.set_team_type(ally, TeamType.hostile_ally)", "def playerdefeated(self):\n globalvalues.gameover_combat()", "def weapon_check():\n if get_locations()['player'] == get_locations()['weapon']:\n STATUS['weapon'] = 'armed'\n STATUS['locations']['weapon'] = None\n print(\"You found the weapon! Now go and kill the monster!\")", "def __attack(self, target):\n attack_difference = (Warrior.attack(self, target))\n if attack_difference > 5:\n print(\"Second attack with ANGRY!\")\n Warrior.attack(self, target)\n return None", "def squad_attacks(self, attacker, defender):\n\n for unit in attacker.units:\n if(len(defender.units) != 0):\n if(unit.hit):\n enemy = R.choice(defender.units)\n enemy.damage_inflicte(unit.beat(enemy))\n else:\n return", "def attack(health_meter):\n hit_list = 4 * ['igrac'] + 6 * ['neprijatelj']\n injured_unit = random.choice(hit_list)\n hit_points = health_meter[injured_unit]\n injury = random.randint(10, 15)\n health_meter[injured_unit] = max(hit_points - injury, 0)\n print(\"NAPAD! \", end='')\n show_health(health_meter)", "def yell():\n ground_description_int = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS]\n if ground_description_int != 12:\n printmessage(\"You yell, but nobody hears you.\", 5, CYAN, 1)\n else:\n printmessage(\"You have found the ranger, amd won the game!\", 5, GREEN, 3)\n die(\"ranger\")", "def fight(who_fight=None):\r\n global monsters_defeated\r\n \r\n if isinstance(who_fight,helpful.Being):\r\n ###specific monster\r\n enemy = who_fight\r\n\r\n elif isinstance(who_fight,list):\r\n ###list of categories\r\n enemy = items_lists.random_monster(random.choice(who_fight))\r\n\r\n else:\r\n ###else picks a monster at random, not boss though\r\n enemy = items_lists.random_monster()\r\n \r\n\r\n\r\n # print 'fighting:\\n' + enemy.advanced_str()\r\n encountered = words.being_adj().capitalize() + ' ' + str(enemy)\r\n raw_input(str(player) + ' encounters a ' + encountered + '!\\n')\r\n choice = helpful.pick_item(['yes','no','inventory'],'Fight?','inventory')\r\n\r\n while choice == 'inventory':\r\n inspect_inventory()\r\n choice = helpful.pick_item(['yes','no','inventory'],'Fight?','inventory')\r\n\r\n if choice == 'yes':\r\n\r\n while enemy.get_health() > 0 and player.get_health() > 0:\r\n #player attacks\r\n item = helpful.pick_item(player.get_inventory(), 'What to use?')\r\n player.use(item)\r\n attack = item.get_damage()\r\n defend = item.get_health()\r\n\r\n if attack > 0:\r\n enemy.hit(item)\r\n raw_input('You dealt ' +str(attack) + ' damage!')\r\n elif defend > 0:\r\n raw_input('You gained ' + str(defend) + ' HP!')\r\n else:\r\n raw_input('That was pretty dumb.\\n')\r\n \r\n if enemy.get_health() > 0: #if the enemy is still alive\r\n\r\n ###enemy attacks, using random item in enemy's inventory\r\n enemy_choice = random.choice(enemy.get_inventory())\r\n player.hit(enemy_choice)\r\n raw_input(str(enemy).capitalize() + ' used ' + str(enemy_choice) + '!\\n')\r\n raw_input('You lost ' + str(enemy_choice.get_damage()) + ' health!\\n')\r\n \r\n player.set_health(max(0,player.get_health())) #make health nonnegative\r\n enemy.set_health(max(0,enemy.get_health()))\r\n\r\n print('Player Health: ' + str(player.get_health()) + '\\n')\r\n raw_input(str(enemy) + ' Health: ' + str(enemy.get_health()) + '\\n')\r\n \r\n if enemy.get_health() == 0:\r\n winner = str(player)\r\n raw_input('You looted the following items:\\n' + enemy.get_inv_string())\r\n player.grab_items(enemy.get_inventory())\r\n result = 'win'\r\n monsters_defeated += 1\r\n\r\n if player.get_health() == 0:\r\n winner = str(enemy)\r\n result = 'death'\r\n\r\n print(winner + ' wins!\\n')\r\n\r\n elif choice == 'no':\r\n\r\n ouch = random.randrange(0,2)\r\n if enter_two == config.confus(config.config2):\r\n ouch = 0\r\n global cheated\r\n cheated = True\r\n print '<yolo>'\r\n if ouch:\r\n enemy_choice = random.choice(enemy.get_inventory())\r\n player.hit(enemy_choice)\r\n print 'You got away, but were hit by the ' + \\\r\n str(enemy) +\"'s \" + str(enemy_choice) +'!' + '\\n'\r\n raw_input('You sustained ' + str(enemy_choice.get_damage()) +' damage.\\n')\r\n if player.get_health() <= 0:\r\n return 'death'\r\n else:\r\n raw_input('You got away safely!\\n\\nThat was close!\\n')\r\n result = 'lose'\r\n\r\n return result", "async def attacking_logic(self):\n if len(self.units(UnitTypeId.ZERGLING)) >= 6:\n for zergling in self.units(UnitTypeId.ZERGLING):\n self.do(zergling.attack(self.enemy_start_locations[0]))", "def event_m20_11_x73(z52=_):\n \"\"\"State 0,1: Defeat determination\"\"\"\n IsChrDead(0, z52)\n assert ConditionGroup(0)\n \"\"\"State 2: End state\"\"\"\n return 0", "def checkhealth(currentstrength, currenthunger):\n global HUNGER\n global STRENGTH\n flash = False\n grizzly_text = \"\"\n\n if currentstrength <= 0:\n if FIGHT:\n if GRIZZLY_BEAR:\n grizzly_text = \"grizzly \"\n printmessage(\"The %sbear has killed you.\" % grizzly_text, 7, MAGENTA, 2)\n else:\n printmessage(\"You have died from severe exhaustion.\", 5, RED, 2)\n die('tooweak')\n\n for i in range(0, 5): \n strengthrange = (79, 59, 39, 19, 0)\n if currentstrength in range(strengthrange[i], strengthrange[i] + 20):\n STRENGTH = STRENGTH_TEXT[i]\n if currentstrength > 99:\n STRENGTH = STRENGTH_TEXT[0]\n if currentstrength <= 19: \n flash = True\n update_strength(flash)\n flash = False # Make sure flash isnt incorrectly set for hunger too\n\n if currenthunger <= 0:\n printmessage(\"You have died from malnutrition.\", 5, RED, 2)\n die('starved')\n\n for i in range(0, 5): \n hungerrange = (79, 59, 39, 19, 0)\n if currenthunger in range(hungerrange[i], hungerrange[i] + 20): \n HUNGER = HUNGER_TEXT[i]\n if currenthunger > 99:\n HUNGER = HUNGER_TEXT[0]\n if currenthunger <= 19: \n flash = True\n update_hunger(flash)", "def student_flee_attack(player: Character, opponent: Student):\n opponent.student_damage = opponent.get_student_flee_attack_damage()\n player.hp -= opponent.student_damage\n print(\"\\nStudent sucker punched you before you ran away!\")\n print(\"Student did \" + str(opponent.student_damage) + \" damage to \" + player.name.title())\n print(player.name.title() + \" has \" + str(player.hp) + \" HP left\\n\")", "def attacker(self):\n step = None\n my_card = self.player.player_step(self.my_hand.get_hand())\n s = 0\n while True:\n self.my_hand.give(my_card, self.table)\n #print()\n print(\"TABLE\")\n print(self.table)\n print(\"#\" * 100)\n other_card = self.enemy.enemy_repel(self.table.get_hand()[s], self.other_hand.get_hand())\n if other_card != None:\n self.other_hand.give(other_card, self.table)\n print(self.table)\n print(\"#\" * 100)\n\n step = 1\n else:\n\n step = 0\n for i in range(len(self.table.get_hand())):\n # self.table.give(self.table.get_hand()[i], self.other_hand)\n self.other_hand.add(self.table.get_hand()[i])\n break\n print(\"Your hand\")\n print(self.my_hand)\n my_card = self.player.toss(self.table.get_hand(), self.my_hand.get_hand())\n if my_card == None:\n break\n s = s + 2\n if step == 1:\n #print()\n print(\"Successful defense\")\n else:\n print(\"To abandon the defense\")\n\n self.table.get_hand().clear()\n\n return step", "def test_gain(self):\n self.plr.piles[Piles.DECK].set(\"Duchy\")\n self.plr.test_input = [\"Get Estate\"]\n self.plr.gain_card(\"Cursed Village\")\n self.assertNotIn(\"Curse\", self.plr.piles[Piles.DISCARD])\n self.assertIsNotNone(self.plr.piles[Piles.DISCARD][\"Estate\"])\n self.assertIn(\"Duchy\", self.g.trashpile)", "def chooseAttack(opponents_board):\r\n while True:\r\n guess = int(raw_input(\"choose a number between 0 through 8 to attack::\"))\r\n if guess < 0 or guess >8:\r\n continue\r\n result = checkIfHitOrMiss(guess, opponents_board)\r\n\r\n\r\n if result == \"hit\" or result == \"miss\":\r\n break\r\n\r\n if checkIfSunk(opponents_board):\r\n return \"sunk\"\r\n\r\n return result", "def check_cheating(self, dice=[]):\n\n #Assume they're not cheating until proven guilty\n self.cheating = False\n\n if self.current_stage == 3:\n if self.die_a not in dice and (self.die_a.value == 6):\n print(\"You're cheating! You cannot lock a 6! You cannot win \"\n \"until you reroll it!\")\n self.cheating = True\n elif self.die_b not in dice and (self.die_b.value == 6):\n print(\"You're cheating! You cannot lock a 6! You cannot win \"\n \"until you reroll it!\")\n self.cheating = True", "def _attack(self,target):\r\n damage = self.get_strength() * self.get_lvl()\r\n target.receive_damage(damage)", "def test_not_unopposed(self):\n s1 = self.battle.create_skirmish(self.alice, 2) # Attack 2\n s1.react(self.bob, 1) # --Attack 1\n s1.resolve()\n self.assertFalse(s1.unopposed)" ]
[ "0.6679059", "0.6258045", "0.6146013", "0.614008", "0.6102599", "0.6074899", "0.60706127", "0.6017862", "0.6010168", "0.5993785", "0.5981388", "0.5903833", "0.5761203", "0.5753857", "0.5733192", "0.57187116", "0.56712544", "0.5667836", "0.56673616", "0.5666955", "0.5656628", "0.5625253", "0.5619121", "0.5601515", "0.5592762", "0.5582794", "0.5579769", "0.5574884", "0.55740494", "0.5569545" ]
0.65712726
1
Sector zero isn't a valid place to fight
def test_cant_start_fight_in_sector_zero(self): battle = self.battle self.alice.sector = 0 prev_skirmishes = self.sess.query(db.SkirmishAction).count() with self.assertRaises(db.NoSuchSectorException): battle.create_skirmish(self.alice, 2) now_skirmishes = self.sess.query(db.SkirmishAction).count() self.assertEqual(prev_skirmishes, now_skirmishes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_no_support_different_sectors(self):\n battle = self.battle\n self.carol.sector = 7\n\n s1 = battle.create_skirmish(self.alice, 2)\n prev_skirmishes = self.sess.query(db.SkirmishAction).count()\n with self.assertRaises(db.WrongSectorException):\n s1.react(self.carol, 2, hinder=False)\n now_skirmishes = self.sess.query(db.SkirmishAction).count()\n self.assertEqual(prev_skirmishes, now_skirmishes)", "def test_allow_sector_movement_in_battle(self):\n self.conf[\"game\"][\"num_sectors\"] = 7\n self.conf[\"game\"][\"allow_sector_retreat\"] = True\n self.battle.create_skirmish(self.alice, 1)\n curr = self.alice.region\n\n self.alice.move(100, curr, 15, sector=3, conf=self.conf)\n\n n = (self.sess.query(db.MarchingOrder).\n filter_by(leader=self.alice)).count()\n self.assertEqual(n, 1)", "def test_no_oppose_different_sectors(self):\n battle = self.battle\n self.bob.sector = 7\n\n s1 = battle.create_skirmish(self.alice, 2)\n prev_skirmishes = self.sess.query(db.SkirmishAction).count()\n\n with self.assertRaises(db.WrongSectorException):\n s1.react(self.bob, 2)\n\n now_skirmishes = self.sess.query(db.SkirmishAction).count()\n self.assertEqual(prev_skirmishes, now_skirmishes)", "def _validate_scenario_against_sector(sector: SectorElement, scenario: dict):\n # TODO(rkm 2020-01-23) Additionally, since the coordinates are repeated in both\n # the sector and scenario definitions, we could also assert that the coordinates\n # for each fix match\n assert sector\n try:\n sector_fixes = list(sector.shape.fixes.keys())\n for aircraft in scenario[\"aircraft\"]:\n for fixName in [x[\"fixName\"] for x in aircraft[\"route\"]]:\n assert (\n fixName in sector_fixes\n ), f\"Fix {fixName} not in {sector_fixes}\"\n except AssertionError as e:\n return f\"Scenario not valid with the current sector: {e}\"", "def test_disallow_absent_fighting(self):\n londo = self.get_region(\"Orange Londo\")\n self.alice.region = londo\n self.sess.commit()\n\n with self.assertRaises(db.NotPresentException):\n self.battle.create_skirmish(self.alice, 1)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 0)", "def has_sector(self,s):\n for run in self.byteruns():\n if run.has_sector(s): return True\n return False", "def check_for_missing_sector_data(df, target_sector_level):\n\n from flowsa.flowbyfunctions import replace_NoneType_with_empty_cells, replace_strings_with_NoneType\n\n # temporarily replace null values with empty cells\n df = replace_NoneType_with_empty_cells(df)\n\n activity_field = \"SectorProducedBy\"\n rows_lost = pd.DataFrame()\n cw_load = load_sector_length_crosswalk_w_nonnaics()\n for i in range(3, sector_level_key[target_sector_level]):\n # create df of i length\n df_subset = df.loc[df[activity_field].apply(lambda x: len(x) == i)]\n\n # import cw and subset to current sector length and target sector length\n\n nlength = list(sector_level_key.keys())[list(sector_level_key.values()).index(i)]\n cw = cw_load[[nlength, target_sector_level]].drop_duplicates()\n # add column with counts\n cw['sector_count'] = cw.groupby(nlength)[nlength].transform('count')\n\n # merge df & replace sector produced columns\n df_x = pd.merge(df_subset, cw, how='left', left_on=[activity_field], right_on=[nlength])\n df_x[activity_field]=df_x[target_sector_level]\n df_x= df_x.drop(columns=[nlength, target_sector_level])\n\n # calculate new flow amounts, based on sector count, allocating equally to the new sector length codes\n df_x['FlowAmount'] = df_x['FlowAmount'] / df_x['sector_count']\n df_x = df_x.drop(columns=['sector_count'])\n # replace null values with empty cells\n df_x = replace_NoneType_with_empty_cells(df_x)\n\n # append to df\n sector_list = df_subset[activity_field].drop_duplicates()\n if len(df_x) != 0:\n log.warning('Data found at ' + str(i) + ' digit NAICS to be allocated'\n ': {}'.format(' '.join(map(str, sector_list))))\n rows_lost = rows_lost.append(df_x, ignore_index=True, sort=True)\n\n if len(rows_lost) == 0:\n log.info('No data loss from NAICS in dataframe')\n else:\n log.info('Allocating FlowAmounts equally to each ' + target_sector_level)\n\n # add rows of missing data to the fbs sector subset\n df_allocated = pd.concat([df, rows_lost], ignore_index=True, sort=True)\n df_allocated = df_allocated.loc[df_allocated[activity_field].apply(lambda x: len(x)==sector_level_key[target_sector_level])]\n df_allocated.reset_index(inplace=True)\n\n # replace empty cells with NoneType (if dtype is object)\n df_allocated = replace_strings_with_NoneType(df_allocated)\n\n return df_allocated", "def test_privatize_fountain_card(self):\n g = Game()\n g.add_player(uuid4(), 'p0')\n g.add_player(uuid4(), 'p1')\n\n gs = g\n p0, p1 = gs.players\n\n latrine, insula, statue, road = cm.get_cards(['Latrine', 'Insula', 'Statue', 'Road'])\n p0.fountain_card = latrine\n\n gs_private = g.privatized_game_state_copy('p1')\n p0, p1 = gs_private.players\n\n self.assertEqual(p0.fountain_card, Card(-1))", "def find_sector(self, x, y):\n # Initialize the sector guess\n m = x.size\n x_pos_guess = (np.ones(m) * self.x_n / 2).astype(int)\n y_pos_guess = (np.ones(m) * self.y_n / 2).astype(int)\n\n # Define a function that checks whether a set of points violates a linear\n # boundary defined by (x_bound_1,y_bound_1) and (x_bound_2,y_bound_2),\n # where the latter is *COUNTER CLOCKWISE* from the former. Returns\n # 1 if the point is outside the boundary and 0 otherwise.\n def violation_check(\n x_check, y_check, x_bound_1, y_bound_1, x_bound_2, y_bound_2\n ):\n return (\n (y_bound_2 - y_bound_1) * x_check - (x_bound_2 - x_bound_1) * y_check\n > x_bound_1 * y_bound_2 - y_bound_1 * x_bound_2\n ) + 0\n\n # Identify the correct sector for each point to be evaluated\n these = np.ones(m, dtype=bool)\n max_loops = self.x_n + self.y_n\n loops = 0\n while np.any(these) and loops < max_loops:\n # Get coordinates for the four vertices: (xA,yA),...,(xD,yD)\n x_temp = x[these]\n y_temp = y[these]\n xA = self.x_values[x_pos_guess[these], y_pos_guess[these]]\n xB = self.x_values[x_pos_guess[these] + 1, y_pos_guess[these]]\n xC = self.x_values[x_pos_guess[these], y_pos_guess[these] + 1]\n xD = self.x_values[x_pos_guess[these] + 1, y_pos_guess[these] + 1]\n yA = self.y_values[x_pos_guess[these], y_pos_guess[these]]\n yB = self.y_values[x_pos_guess[these] + 1, y_pos_guess[these]]\n yC = self.y_values[x_pos_guess[these], y_pos_guess[these] + 1]\n yD = self.y_values[x_pos_guess[these] + 1, y_pos_guess[these] + 1]\n\n # Check the \"bounding box\" for the sector: is this guess plausible?\n move_down = (y_temp < np.minimum(yA, yB)) + 0\n move_right = (x_temp > np.maximum(xB, xD)) + 0\n move_up = (y_temp > np.maximum(yC, yD)) + 0\n move_left = (x_temp < np.minimum(xA, xC)) + 0\n\n # Check which boundaries are violated (and thus where to look next)\n c = (move_down + move_right + move_up + move_left) == 0\n move_down[c] = violation_check(\n x_temp[c], y_temp[c], xA[c], yA[c], xB[c], yB[c]\n )\n move_right[c] = violation_check(\n x_temp[c], y_temp[c], xB[c], yB[c], xD[c], yD[c]\n )\n move_up[c] = violation_check(\n x_temp[c], y_temp[c], xD[c], yD[c], xC[c], yC[c]\n )\n move_left[c] = violation_check(\n x_temp[c], y_temp[c], xC[c], yC[c], xA[c], yA[c]\n )\n\n # Update the sector guess based on the violations\n x_pos_next = x_pos_guess[these] - move_left + move_right\n x_pos_next[x_pos_next < 0] = 0\n x_pos_next[x_pos_next > (self.x_n - 2)] = self.x_n - 2\n y_pos_next = y_pos_guess[these] - move_down + move_up\n y_pos_next[y_pos_next < 0] = 0\n y_pos_next[y_pos_next > (self.y_n - 2)] = self.y_n - 2\n\n # Check which sectors have not changed, and mark them as complete\n no_move = np.array(\n np.logical_and(\n x_pos_guess[these] == x_pos_next, y_pos_guess[these] == y_pos_next\n )\n )\n x_pos_guess[these] = x_pos_next\n y_pos_guess[these] = y_pos_next\n temp = these.nonzero()\n these[temp[0][no_move]] = False\n\n # Move to the next iteration of the search\n loops += 1\n\n # Return the output\n x_pos = x_pos_guess\n y_pos = y_pos_guess\n return x_pos, y_pos", "def event_m20_11_x73(z52=_):\n \"\"\"State 0,1: Defeat determination\"\"\"\n IsChrDead(0, z52)\n assert ConditionGroup(0)\n \"\"\"State 2: End state\"\"\"\n return 0", "def sanity_check(self):\n for i in xrange(self.seats):\n ieat = self.states[i] == EATING\n leat = self.states[self.left(i)] == EATING\n reat = self.states[self.right(i)] == EATING\n assert(not(ieat and (leat or reat)))", "def test_miss(self):\n ship = Ship([self.hit])\n self.assertEqual([self.hit], ship.location)\n self.assertEqual(0, ship.check_hit((1, 1)))\n self.assertEqual(1, len(ship.location))", "def check_limits(self):\n\n #Find the relative position of each leg vs. its \"zero\" position\n relpos = self.fixed_plate - self.fixed_plate_zero\n\n for leg in range(3):\n #Check that the leg is within allowable \"safe zone\"\n #Use the position of the leg (relative to 0) to find the index in the \"safe zone\" matrix\n i_x = nearest_index(self.leg_safe_xaxis, relpos[COORD_X, leg])\n i_z = nearest_index(self.leg_safe_zaxis, relpos[COORD_Z, leg])\n #Look up in the safe zone.\n self.leg_fault[leg] = (not self.leg_safe_zone[leg, i_x, i_z])\n\n if (not all(np.isreal(self.fixed_plate[:, leg]))) or any(np.isnan(self.fixed_plate[:, leg])):\n #A complex or NaN value = the angle found for the leg was invalid, meaning that the\n #leg would have to be longer to reach the desired position.\n self.leg_fault[leg] = True", "def test_exceptions_optimal_sector(symbols, geometry, generators, num_electrons, msg_match):\n mol = qml.qchem.Molecule(symbols, geometry)\n hamiltonian = qml.qchem.diff_hamiltonian(mol)()\n\n with pytest.raises(ValueError, match=msg_match):\n optimal_sector(hamiltonian, generators, num_electrons)", "def test_disallow_retreat(self):\n self.battle.create_skirmish(self.alice, 1)\n londo = self.get_region(\"Orange Londo\")\n\n with self.assertRaises(db.InProgressException):\n self.alice.move(100, londo, 0)\n\n n = (self.sess.query(db.MarchingOrder).\n filter_by(leader=self.alice)).count()\n self.assertEqual(n, 0)", "def valid(point):\n index = offset(point)\n if tiles[index] == 0:\n return False\n\n index = offset(point + 19)\n\n if tiles[index] == 0:\n return False\n\n return point.x % 20 == 0 or point.y % 20 == 0", "def subSanity(self):\n\t\tself.sanity -= 1\n\t\tif self.sanity < -10:\n\t\t\tself.sanity = -10", "def validpositions(tile):\n if tile == 11 or tile == 21:\n valid_pos = \"n\"\n elif tile == 12:\n valid_pos = \"nes\"\n elif tile == 13:\n valid_pos = \"es\"\n elif tile == 22 or tile == 33:\n valid_pos = \"sw\"\n elif tile == 23:\n valid_pos = \"ew\"\n elif tile == 32:\n valid_pos = \"ns\"\n possible_directions(valid_pos)\n return valid_pos", "def solve_p2(lines: List[str]) -> int:\n for line in lines:\n room = Room(line)\n if room.is_valid():\n name = ShiftCipher.decode(room).lower()\n if re.match(r'north\\s*pole\\s*object', name):\n return room.sector_id\n return 0", "def check_dead(cart):\n id = cart_to_loc(cart)\n return voxel_data[id] == 0", "def test_failed_attack(self):\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 10) # Attack 10\n s1.react(self.bob, 10) # --Attack 10\n s1.react(self.dave, 9) # --Attack 9\n\n result = s1.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.bob.team)\n self.assertEqual(result.vp, 10)", "def check_stability(self, sup_section: list, pos_excavation: float,\r\n tunnel_diameter: float,\r\n cutting_length: int, rockmass_dict: dict) -> float:\r\n # if excavation is within supported area pf = always negative\r\n if sup_section[pos_excavation] == 1:\r\n pf = -1\r\n else:\r\n pf = self.face_pressure(tunnel_diameter, cutting_length,\r\n rockmass_dict)\r\n return pf", "def test_failed_draw_case():\n hands = draw_hands(4)[0]\n start_tile = pick_start_tile(hands)\n\n\n # testing that I'm handling weird start players right\n fail_draws = 0\n good_draws = 0\n for i in range(10000):\n hands = draw_hands(4)[0]\n start_tile = pick_start_tile(hands)[0]\n if start_tile is None:\n fail_draws += 1\n else:\n good_draws += 1\n\n return fail_draws, good_draws", "def is_valid(problem, i, j, e):\n row_map = row_maps[i]\n column_map = column_maps[j]\n sector_map = sector_maps[get_sector_number(i, j)]\n not_in_row = row_map[e-1] == 0\n not_in_column = column_map[e-1] == 0\n not_in_sector = sector_map[e-1] == 0\n\n return not_in_row and not_in_column and not_in_sector", "def isvalidposition(pdic,iprime,distance):\r\n \r\n # deal with base shifts \r\n distance = distance-2\r\n \r\n istforbidden = 0\r\n for o in range(-distance,distance+2,1):\r\n if (iprime+o in pdic):\r\n # E = end of orf\r\n # S = start of orf\r\n if((pdic[iprime+o]==\"E\") or (pdic[iprime+o]==\"S\")):\r\n if((o >3) or (o <-3)):\r\n pass\r\n else:\r\n istforbidden = 1\r\n break\r\n else:\r\n istforbidden = 1\r\n break\r\n else:\r\n pass\r\n \r\n return(istforbidden)", "def test_occupied1(self):\n array = np.array(['A'])\n self.assertFalse(self.tt.occupied(array, 0, 0))", "def can_we_change_ace():\n return s + Deck.ace.value[0] > 21 and added_ace_11 is True", "def fill_tank(self):\n print(\"This car has no fuel tank!\")", "def test_positive_electrode_potential_profile(self):\n\n # TODO: add these when have averages", "def is_valid(self, move):\r\n return move > 10 and move < 89" ]
[ "0.66198516", "0.64547235", "0.6400996", "0.60747266", "0.5686613", "0.5592364", "0.5578621", "0.54812294", "0.5431174", "0.5384251", "0.53677344", "0.5342912", "0.5332669", "0.53319407", "0.53308547", "0.53085345", "0.52721715", "0.52696204", "0.52576536", "0.5248324", "0.5244763", "0.5235968", "0.5226477", "0.52241576", "0.5165334", "0.514115", "0.51284295", "0.51213646", "0.5104307", "0.50983787" ]
0.71960646
0
Make sure VP2.0 is working as intended
def test_vp_mark2(self): # Test of the VP system as outlined at http://redd.it/2k96il battle = self.battle s1 = battle.create_skirmish(self.bob, 30) # Attack with 30 -> 8vp s2 = s1.react(self.alice, 15, troop_type="cavalry") # Oppose with 30 -> 7vp s2.react(self.bob, 14) # Oppose with 14 result = s1.resolve() self.assertEqual(result.victor, self.bob.team) # Old way adds up VP, make sure that's not happening self.assertNotEqual(result.vp, 22) # New way only adds up VP for winning side # (8vp because the 15 in s2 was reduced to 8) self.assertEqual(result.vp, 8) self.assertEqual(result.vp, result.vp_for_team(self.bob.team)) # What if the other side had won? self.assertEqual(result.vp_for_team(self.alice.team), 14)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_vsg_for_multiple_vcpes_in_vsg_vm_with_one_vcpe_going_down(self):", "def test_post_nveto_pmts(self):\n pass", "def has_vp(self):\n raise NotImplementedError", "def test_post_voltage_maps(self):\n pass", "def is_vp_on(self):\n raise NotImplementedError", "def test_vsg_for_multiple_vcpes_in_vsg_vm_with_one_vcpe_paused(self):", "def check_stability(self):", "def test_determine_valid_virtualization_realms(self):\n pass", "def test_ipam_vrfs_update(self):\n pass", "def test_change_provisioned_throughput_usual_case():", "def test_vsg_for_multiple_vcpes_in_vsg_vm_with_one_vcpe_restart(self):", "def test_get_nveto_pmts(self):\n pass", "def test_verify_sp_reinit(self):\n proj_obj, fabric_obj, pr_obj = self._create_prerequisites(\n enterprise_style_flag=False)\n esi_id = '00:11:22:33:44:55:66:77:88:99'\n vlan_1 = 42\n vlan_2 = '4094'\n vlan_3 = 4093\n pi_name = self.id() + '_physical_interface1'\n pi = PhysicalInterface(name=pi_name,\n parent_obj=pr_obj,\n ethernet_segment_identifier=esi_id)\n pi_uuid = self._vnc_lib.physical_interface_create(pi)\n pi_obj = self._vnc_lib.physical_interface_read(id=pi_uuid)\n\n fabric_name = fabric_obj.get_fq_name()\n pi_fq_name = pi_obj.get_fq_name()\n\n # Create VPG\n vpg_name = \"vpg-1\"\n vpg = VirtualPortGroup(vpg_name, parent_obj=fabric_obj)\n vpg_uuid = self.api.virtual_port_group_create(vpg)\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_uuid)\n vpg_name = vpg_obj.get_fq_name()\n\n # Create single VN\n vn1 = VirtualNetwork('vn1-%s' % (self.id()), parent_obj=proj_obj)\n self.api.virtual_network_create(vn1)\n\n # Create a VMI that's attached to vpg-1 and having reference\n # to vn1\n vmi_obj_1 = VirtualMachineInterface(self.id() + \"1\",\n parent_obj=proj_obj)\n vmi_obj_1.set_virtual_network(vn1)\n\n # Create KV_Pairs for this VMI\n kv_pairs = self._create_kv_pairs(pi_fq_name,\n fabric_name,\n vpg_name)\n\n vmi_obj_1.set_virtual_machine_interface_bindings(kv_pairs)\n\n vmi_obj_1.set_virtual_machine_interface_properties(\n VirtualMachineInterfacePropertiesType(\n sub_interface_vlan_tag=vlan_1))\n vmi_uuid_1 = self.api.virtual_machine_interface_create(vmi_obj_1)\n vpg_obj.add_virtual_machine_interface(vmi_obj_1)\n self.api.virtual_port_group_update(vpg_obj)\n\n mock_zk = self._api_server._db_conn._zk_db\n # Verify if Znode are created for VMI1\n validation_node1 = os.path.join(\n _DEFAULT_ZK_FABRIC_SP_PATH_PREFIX,\n 'virtual-port-group:%s' % vpg_uuid,\n 'virtual-network:%s' % vn1.uuid,\n 'vlan:%s' % vlan_1)\n # Read Znode\n znode_vmi_1_uuid = mock_zk._zk_client.read_node(validation_node1)\n # Verify if correct Znodes are created\n assert znode_vmi_1_uuid == vmi_uuid_1, \\\n \"Znode for VMI_1 (%s) doesn't exist\" % vmi_uuid_1\n\n # Attach Second VMI with untagged vlan\n vn2 = VirtualNetwork('vn2-%s' % (self.id()), parent_obj=proj_obj)\n self.api.virtual_network_create(vn2)\n\n # Create first untagged VMI and attach it to Virtual Port Group\n vmi_obj_2 = VirtualMachineInterface(self.id() + \"2\",\n parent_obj=proj_obj)\n vmi_obj_2.set_virtual_network(vn2)\n\n # Create KV_Pairs for this VMI with an untagged VLAN\n # If tor_port_vlan_id is set, then it signifies a untagged VMI\n kv_pairs = self._create_kv_pairs(pi_fq_name,\n fabric_name,\n vpg_name,\n tor_port_vlan_id=vlan_2)\n\n vmi_obj_2.set_virtual_machine_interface_bindings(kv_pairs)\n vmi_uuid_2 = self.api.virtual_machine_interface_create(vmi_obj_2)\n vpg_obj = self.api.virtual_port_group_read(id=vpg_obj.uuid)\n vpg_obj.add_virtual_machine_interface(vmi_obj_2)\n self.api.virtual_port_group_update(vpg_obj)\n\n # Verify if Znode are created for VMI2\n validation_node2 = os.path.join(\n _DEFAULT_ZK_FABRIC_SP_PATH_PREFIX,\n 'virtual-port-group:%s' % vpg_uuid,\n 'untagged')\n\n # Read Znode\n znode_vmi_2_uuid = mock_zk._zk_client.read_node(validation_node2)\n # Verify if correct Znodes are created\n assert znode_vmi_2_uuid == vmi_uuid_2, \\\n \"Znode for VMI_2 (%s) doesn't exist\" % vmi_uuid_2\n # Create another third VN with second tagged VMI\n vn3 = VirtualNetwork('vn3-%s' % (self.id()), parent_obj=proj_obj)\n self.api.virtual_network_create(vn3)\n\n # Create a VMI that's attached to vpg-1 and having reference\n # to vn3\n vmi_obj_3 = VirtualMachineInterface(self.id() + \"3\",\n parent_obj=proj_obj)\n vmi_obj_3.set_virtual_network(vn3)\n\n # Create KV_Pairs for this VMI\n kv_pairs = self._create_kv_pairs(pi_fq_name,\n fabric_name,\n vpg_name)\n\n vmi_obj_3.set_virtual_machine_interface_bindings(kv_pairs)\n\n vmi_obj_3.set_virtual_machine_interface_properties(\n VirtualMachineInterfacePropertiesType(\n sub_interface_vlan_tag=vlan_3))\n vmi_uuid_3 = self.api.virtual_machine_interface_create(vmi_obj_3)\n vpg_obj = self.api.virtual_port_group_read(id=vpg_obj.uuid)\n vpg_obj.add_virtual_machine_interface(vmi_obj_3)\n self.api.virtual_port_group_update(vpg_obj)\n\n vpg_obj = self.api.virtual_port_group_read(id=vpg_obj.uuid)\n\n # Verify if Znode are created for VMI3\n validation_node3 = os.path.join(\n _DEFAULT_ZK_FABRIC_SP_PATH_PREFIX,\n 'virtual-port-group:%s' % vpg_uuid,\n 'virtual-network:%s' % vn3.uuid,\n 'vlan:%s' % vlan_3)\n\n # Read Znode\n znode_vmi_3_uuid = mock_zk._zk_client.read_node(validation_node3)\n # Verify if correct Znodes are created\n assert znode_vmi_3_uuid == vmi_uuid_3, \\\n \"Znode for VMI_3 (%s) doesn't exist\" % vmi_uuid_3\n\n # Delete all Znodes for VMI1, VMI2, VMI3\n mock_zk._zk_client.delete_node(validation_node1, True)\n mock_zk._zk_client.delete_node(validation_node2, True)\n mock_zk._zk_client.delete_node(validation_node3, True)\n\n # manually setting contrail_version to 21.4\n # so db_resync is run as part of upgrade scenario\n self._api_server._args.contrail_version = '21.4'\n\n self._api_server._db_conn._db_resync_done.clear()\n # API server DB reinit\n self._api_server._db_init_entries()\n self._api_server._db_conn.wait_for_resync_done()\n\n # Verify if Znodes are added back\n znode_vmi_1_uuid = mock_zk._zk_client.read_node(validation_node1)\n znode_vmi_2_uuid = mock_zk._zk_client.read_node(validation_node2)\n znode_vmi_3_uuid = mock_zk._zk_client.read_node(validation_node3)\n\n # Verify if correct Znodes are created\n assert znode_vmi_1_uuid == vmi_uuid_1, \\\n \"Znode for VMI_1 (%s) doesn't exist\" % vmi_uuid_1\n assert znode_vmi_2_uuid == vmi_uuid_2, \\\n \"Znode for VMI_2 (%s) doesn't exist\" % vmi_uuid_2\n assert znode_vmi_3_uuid == vmi_uuid_3, \\\n \"Znode for VMI_3 (%s) doesn't exist\" % vmi_uuid_3\n\n # Delete VMIs from VPG\n self.api.virtual_machine_interface_delete(id=vmi_uuid_1)\n self.api.virtual_machine_interface_delete(id=vmi_uuid_2)\n self.api.virtual_machine_interface_delete(id=vmi_uuid_3)\n self.api.virtual_port_group_delete(id=vpg_obj.uuid)\n self.api.physical_interface_delete(id=pi_uuid)\n self.api.physical_router_delete(id=pr_obj.uuid)\n self.api.fabric_delete(id=fabric_obj.uuid)\n # adding back zknode to original version\n # so other test cases runs from the begining\n mock_zk._zk_client.update_node(PATH_SYNC, '2011')", "def test_active_inference_SPM_1b(self):", "def test_pm_Completeness(self):\n pass", "def test_VoltageSourcePolarity(self):\n V=VoltageSource.VoltageSource(5,name=\"SRC\")\n\n gnd=AbsoluteVoltage.AbsoluteVoltage(0,name=\"gnd\")\n probe=VoltageProbe.VoltageProbe(name=\"probe\")\n\n V.connect(gnd,\"positive\")\n V.connect(probe,\"negative\")\n\n self.assertEqual( probe.getVoltageResistance(), (-5.0,0.0) )", "def test_uparforvarg(self):", "def test_subscriber_access_if_vsg2_goes_down(self):", "def version_check(self):\n # anchor_matcher --> matcher\n if hasattr(self, \"anchor_matcher\"):\n self.matcher = self.anchor_matcher\n if hasattr(self, \"head_in_features\"):\n self.in_features = self.head_in_features\n if hasattr(self, \"test_topk_candidates\"):\n self.topk_candidates = self.test_topk_candidates\n if hasattr(self, \"test_score_thresh\"):\n self.score_threshold = self.test_score_thresh", "def _verify(self):\n pass", "def bm_and_dvr_supported(self):", "def test_vargs(self):", "def test_4_4_1_1(self):\n pass", "def test_gre_l2(self):\n\n #\n # Add routes to resolve the tunnel destinations\n #\n route_tun1_dst = VppIpRoute(\n self,\n \"2.2.2.2\",\n 32,\n [VppRoutePath(self.pg0.remote_ip4, self.pg0.sw_if_index)],\n )\n route_tun2_dst = VppIpRoute(\n self,\n \"2.2.2.3\",\n 32,\n [VppRoutePath(self.pg0.remote_ip4, self.pg0.sw_if_index)],\n )\n\n route_tun1_dst.add_vpp_config()\n route_tun2_dst.add_vpp_config()\n\n #\n # Create 2 L2 GRE tunnels and x-connect them\n #\n gre_if1 = VppGreInterface(\n self,\n self.pg0.local_ip4,\n \"2.2.2.2\",\n type=(VppEnum.vl_api_gre_tunnel_type_t.GRE_API_TUNNEL_TYPE_TEB),\n )\n gre_if2 = VppGreInterface(\n self,\n self.pg0.local_ip4,\n \"2.2.2.3\",\n type=(VppEnum.vl_api_gre_tunnel_type_t.GRE_API_TUNNEL_TYPE_TEB),\n )\n gre_if1.add_vpp_config()\n gre_if2.add_vpp_config()\n\n gre_if1.admin_up()\n gre_if2.admin_up()\n\n self.vapi.sw_interface_set_l2_xconnect(\n gre_if1.sw_if_index, gre_if2.sw_if_index, enable=1\n )\n self.vapi.sw_interface_set_l2_xconnect(\n gre_if2.sw_if_index, gre_if1.sw_if_index, enable=1\n )\n\n #\n # Send in tunnel encapped L2. expect out tunnel encapped L2\n # in both directions\n #\n tx = self.create_tunnel_stream_l2o4(self.pg0, \"2.2.2.2\", self.pg0.local_ip4)\n rx = self.send_and_expect(self.pg0, tx, self.pg0)\n self.verify_tunneled_l2o4(self.pg0, rx, tx, self.pg0.local_ip4, \"2.2.2.3\")\n\n tx = self.create_tunnel_stream_l2o4(self.pg0, \"2.2.2.3\", self.pg0.local_ip4)\n rx = self.send_and_expect(self.pg0, tx, self.pg0)\n self.verify_tunneled_l2o4(self.pg0, rx, tx, self.pg0.local_ip4, \"2.2.2.2\")\n\n self.vapi.sw_interface_set_l2_xconnect(\n gre_if1.sw_if_index, gre_if2.sw_if_index, enable=0\n )\n self.vapi.sw_interface_set_l2_xconnect(\n gre_if2.sw_if_index, gre_if1.sw_if_index, enable=0\n )\n\n #\n # Create a VLAN sub-interfaces on the GRE TEB interfaces\n # then x-connect them\n #\n gre_if_11 = VppDot1QSubint(self, gre_if1, 11)\n gre_if_12 = VppDot1QSubint(self, gre_if2, 12)\n\n # gre_if_11.add_vpp_config()\n # gre_if_12.add_vpp_config()\n\n gre_if_11.admin_up()\n gre_if_12.admin_up()\n\n self.vapi.sw_interface_set_l2_xconnect(\n gre_if_11.sw_if_index, gre_if_12.sw_if_index, enable=1\n )\n self.vapi.sw_interface_set_l2_xconnect(\n gre_if_12.sw_if_index, gre_if_11.sw_if_index, enable=1\n )\n\n #\n # Configure both to pop thier respective VLAN tags,\n # so that during the x-coonect they will subsequently push\n #\n self.vapi.l2_interface_vlan_tag_rewrite(\n sw_if_index=gre_if_12.sw_if_index, vtr_op=L2_VTR_OP.L2_POP_1, push_dot1q=12\n )\n self.vapi.l2_interface_vlan_tag_rewrite(\n sw_if_index=gre_if_11.sw_if_index, vtr_op=L2_VTR_OP.L2_POP_1, push_dot1q=11\n )\n\n #\n # Send traffic in both directiond - expect the VLAN tags to\n # be swapped.\n #\n tx = self.create_tunnel_stream_vlano4(\n self.pg0, \"2.2.2.2\", self.pg0.local_ip4, 11\n )\n rx = self.send_and_expect(self.pg0, tx, self.pg0)\n self.verify_tunneled_vlano4(self.pg0, rx, tx, self.pg0.local_ip4, \"2.2.2.3\", 12)\n\n tx = self.create_tunnel_stream_vlano4(\n self.pg0, \"2.2.2.3\", self.pg0.local_ip4, 12\n )\n rx = self.send_and_expect(self.pg0, tx, self.pg0)\n self.verify_tunneled_vlano4(self.pg0, rx, tx, self.pg0.local_ip4, \"2.2.2.2\", 11)\n\n #\n # Cleanup Test resources\n #\n gre_if_11.remove_vpp_config()\n gre_if_12.remove_vpp_config()\n gre_if1.remove_vpp_config()\n gre_if2.remove_vpp_config()\n route_tun1_dst.add_vpp_config()\n route_tun2_dst.add_vpp_config()", "def verify(self):", "def test_vggmini_visualize(self):\n\t\tpass", "def test_pvid_vf_tx(self):\n random_vlan = random.randint(1, MAX_VLAN)\n\n self.dut.send_expect(\n \"ip link set %s vf 0 vlan %d\" % (self.host_intf0, random_vlan), \"# \")\n out = self.dut.send_expect(\"ip link show %s\" % self.host_intf0, \"# \")\n self.verify(\"vlan %d\" %\n random_vlan in out, \"Failed to add pvid on VF0\")\n\n self.vm0_dut_ports = self.vm_dut_0.get_ports('any')\n\n self.vm0_testpmd = PmdOutput(self.vm_dut_0)\n if self.kdriver == \"i40e\":\n self.vm0_testpmd.start_testpmd(VM_CORES_MASK, '--crc-strip')\n else:\n self.vm0_testpmd.start_testpmd(VM_CORES_MASK)\n self.vm0_testpmd.execute_cmd('set fwd mac')\n self.vm0_testpmd.execute_cmd('start')\n\n pkt = Packet(pkt_type='UDP')\n pkt.config_layer('ether', {'dst': self.vf1_mac})\n inst = sniff_packets(self.tester_intf0, timeout=5)\n pkt.send_pkt(tx_port=self.tester_intf1)\n pkts = load_sniff_packets(inst)\n\n self.verify(len(pkts), \"Not receive expected packet\")\n self.vm0_testpmd.quit()\n\n # disable pvid\n self.dut.send_expect(\n \"ip link set %s vf 0 vlan 0\" % (self.host_intf0), \"# \")", "def test_vw_controller(self):\n pass\n\n yarp.Network.init()\n\n pose_stream = yarp.BufferedPortBottle()\n pose_stream.open(\"/morse/test/pose/in\")\n yarp.Network.connect(\"/morse/robots/ATRV/Pose/out\", \"/morse/test/pose/in\")\n\n cmd_stream = yarp.BufferedPortBottle()\n cmd_stream.open(\"/morse/test/vw/out\")\n yarp.Network.connect(\"/morse/test/vw/out\", \"/morse/robots/ATRV/Motion_Controller/in\")\n \n # Read the start position, it must be (0.0, 0.0, 0.0)\n pose = pose_stream.read()\n for i in range(6):\n self.assertAlmostEqual(pose.get(i).asDouble(), 0.0, delta=0.1)\n\n\n send_speed(cmd_stream, 1.0, 0.0, 2.0)\n\n pose = pose_stream.read()\n self.assertAlmostEqual(pose.get(0).asDouble(), 2.0, delta=0.1)\n self.assertAlmostEqual(pose.get(1).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(2).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(3).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(4).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(5).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, -1.0, 0.0, 2.0)\n\n pose = pose_stream.read()\n for i in range(6):\n self.assertAlmostEqual(pose.get(i).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, 1.0, -math.pi/4.0, 2.0)\n pose = pose_stream.read()\n self.assertAlmostEqual(pose.get(0).asDouble(), 4.0 / math.pi, delta=0.1)\n self.assertAlmostEqual(pose.get(1).asDouble(), -4.0 / math.pi , delta=0.1)\n self.assertAlmostEqual(pose.get(2).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(3).asDouble(), -math.pi/2.0, delta=0.1)\n self.assertAlmostEqual(pose.get(4).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(5).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, 0.5, -math.pi/8.0, 12.0)\n pose = pose_stream.read()\n for i in range(6):\n self.assertAlmostEqual(pose.get(i).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, -2.0, math.pi/2.0, 3.0)\n pose = pose_stream.read()\n self.assertAlmostEqual(pose.get(0).asDouble(), 4.0 / math.pi, delta=0.1)\n self.assertAlmostEqual(pose.get(1).asDouble(), -4.0 / math.pi , delta=0.1)\n self.assertAlmostEqual(pose.get(2).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(3).asDouble(), -math.pi/2.0, delta=0.1)\n self.assertAlmostEqual(pose.get(4).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(5).asDouble(), 0.0, delta=0.1)\n\n yarp.Network.fini()", "def test_firmware_version(self):\n self._verify_firmware_version()", "def verify(self):\r\n pass" ]
[ "0.61182004", "0.60457355", "0.59960026", "0.5870055", "0.58510983", "0.5850618", "0.58469063", "0.5836071", "0.57391274", "0.5677977", "0.5674923", "0.56707656", "0.56542206", "0.5651678", "0.56377363", "0.5611573", "0.56004316", "0.55828303", "0.55753917", "0.5574288", "0.55407465", "0.5535624", "0.55020124", "0.550134", "0.5484409", "0.54843235", "0.54759854", "0.5462552", "0.5457496", "0.54570717" ]
0.610728
1
Buffs should expire during update
def test_buff_expiration(self): sess = self.sess battle = self.battle # For the buff to work, alice needs to own this region battle.region.owner = self.alice.team sess.commit() s1 = battle.create_skirmish(self.alice, 30) # Attack 30 infantry s1.react(self.bob, 30) # -- oppose 30 infantry s1.react(self.dave, 4) # -- oppose 4 infantry result = s1.resolve() self.assertEqual(result.victor, self.bob.team) self.assertEqual(result.margin, 4) self.assertEqual(result.vp, 30) s2 = battle.create_skirmish(self.bob, 29) # Attack with 29 infantry s2.react(self.alice, 29) # -- oppose with 29 infantry s2.react(self.carol, 2) # -- oppose with 2 result = s2.resolve() self.assertEqual(result.victor, self.alice.team) self.assertEqual(result.margin, 2) self.assertEqual(result.vp, 29) # Bob's winning this, but wait! A buff that expires immediately! buff = db.Buff.otd(expiration=-30) battle.region.buff_with(buff) # One buff should exist in DB self.assertEqual(sess.query(db.Buff).count(), 1) db.Buff.update_all(sess) # Now it should be gone due to expiration self.assertEqual(sess.query(db.Buff).count(), 0) self.end_battle() # Bob wins because buff expired, 30 to 29 self.assertEqual(self.battle.victor, self.bob.team) # score1 is the score for team 1 self.assertEqual(self.battle.score1, 30) # score0 should not include the buff self.assertEqual(self.battle.score0, 29)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_expire_ban(self):\n pass", "def _do_expire(self):\n t = time.time()\n\n # Expire probes\n for ip, expire_at in self.outstanding_probes.items():\n if t > expire_at:\n self.outstanding_probes.pop(ip, None)\n if ip in self.live_servers:\n self.log.warn(\"Server %s down\", ip)\n del self.live_servers[ip]\n\n # Expire flow\n memory = self.memory.copy()\n self.memory.clear()\n for key, val in memory.items():\n ip = key[0]\n if ip in self.live_servers and val.is_expired:\n # Decrease total connection for that server\n self.total_connection[ip] -= 1\n if not val.is_expired:\n self.memory[key] = val", "def on_expire(self):\n pass", "def update(self):\n if not self.exists:\n return\n if AT.TIME_TO_EXPIRE in self.attributes:\n if not self.calculate_time_left():\n self.fire_trigger(TR.TIME_EXPIRED)", "def update(self):\n super().update()\n self.checkTimeToLive()", "def test_evict_expired(self):\n\n # use an invasive technique so that we don't have to sleep for\n # the item to expire\n\n bc = TimedCache(keep_time=1)\n\n bc[\"test\"] = \"value\"\n bc[\"test2\"] = \"value2\"\n self.assertEqual(len(bc), 2)\n\n # test that expired item i\n bc.cache[\"test\"].timestamp = bc.cache[\"test\"].timestamp - 2\n bc.purge_expired()\n self.assertEqual(len(bc), 1)\n self.assertFalse(\"test\" in bc)\n self.assertTrue(\"test2\" in bc)", "def update(self):\n now = farc.Framework._event_loop.time()\n # Collect and prune expired neighbors\n expired_ngbrs = []\n for ngbr_addr, ngbr_data in self._ngbrs.items():\n frame = ngbr_data[\"BCN_FRAME\"]\n rx_time = frame.rx_meta[0]\n if now > rx_time + self._EXPIRATION_PRD:\n expired_ngbrs.append(ngbr_addr)\n for ngbr_addr in expired_ngbrs:\n del self._ngbrs[ngbr_addr]", "def LocalUpdate(self):\n\n # Get current timestamp in miliseconds from unix epoch\n t = int(time.time() * 1000)\n\n # Number of times refill has occured\n lstrefil = self.status['timestamp'] - (60000 - self.status['refillIn'])\n nrefil = (t - lstrefil) / 60000.0\n\n if nrefil > 1:\n self.status['tokensLeft'] += self.status['refillRate'] * \\\n int(nrefil)\n\n if self.status['tokensLeft'] > 60 * self.status['refillRate']:\n self.status['tokensLeft'] = 60 * self.status['refillRate']\n\n # Update timestamps\n self.status['timestamp'] = t\n self.status['refillIn'] = int((1 - nrefil % 1) * 60000)", "def test_remove_expired(self):\n req1 = FakeRequest(1, True)\n req2 = FakeRequest(2, False)\n req3 = FakeRequest(3, True)\n req4 = FakeRequest(4, True)\n req5 = FakeRequest(5, False)\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n self.request_buffer.remove_expired()\n\n self.assertTrue(\n req2 in self.request_buffer.requests and\n req5 in self.request_buffer.requests\n )", "def test_update_ban(self):\n pass", "def testUpdatingAfterCaching(self):\n valid_period = datetime.timedelta(2, 4, 6)\n cached_list_logic.setCacheItems(\n 'test_list', [{KEY: 'foo'}, {KEY: 'bar'}], valid_period)\n cached_list = cached_list_model.CachedList.get_by_id('test_list')\n\n self.assertAlmostEqual(cached_list.valid_through,\n datetime.datetime.now() + valid_period,\n delta=datetime.timedelta(seconds=5))\n\n self.assertFalse(cached_list.is_processing)", "def _expire(self):\n del self.map.addr[self.name]\n self.map.notify(\"addrmap_expired\", *[self.name], **{})", "def testExpirationTime(self):\n\n bye = \"Good bye!\"\n memcache.add('bye', bye, 1)\n assert memcache.get('bye') == bye\n time.sleep(2)\n assert memcache.get('bye') == None", "async def test_age_limit_expiry(hass: HomeAssistant) -> None:\n now = dt_util.utcnow()\n current_time = datetime(now.year + 1, 8, 2, 12, 23, tzinfo=dt_util.UTC)\n\n with freeze_time(current_time) as freezer:\n assert await async_setup_component(\n hass,\n \"sensor\",\n {\n \"sensor\": [\n {\n \"platform\": \"statistics\",\n \"name\": \"test\",\n \"entity_id\": \"sensor.test_monitored\",\n \"state_characteristic\": \"mean\",\n \"sampling_size\": 20,\n \"max_age\": {\"minutes\": 4},\n },\n ]\n },\n )\n await hass.async_block_till_done()\n\n for value in VALUES_NUMERIC:\n current_time += timedelta(minutes=1)\n freezer.move_to(current_time)\n async_fire_time_changed(hass, current_time)\n hass.states.async_set(\n \"sensor.test_monitored\",\n str(value),\n {ATTR_UNIT_OF_MEASUREMENT: UnitOfTemperature.CELSIUS},\n )\n await hass.async_block_till_done()\n\n # After adding all values, we should only see 5 values in memory\n\n state = hass.states.get(\"sensor.test\")\n new_mean = round(sum(VALUES_NUMERIC[-5:]) / len(VALUES_NUMERIC[-5:]), 2)\n assert state is not None\n assert state.state == str(new_mean)\n assert state.attributes.get(\"buffer_usage_ratio\") == round(5 / 20, 2)\n assert state.attributes.get(\"age_coverage_ratio\") == 1.0\n\n # Values expire over time. Only two are left\n\n current_time += timedelta(minutes=3)\n freezer.move_to(current_time)\n async_fire_time_changed(hass, current_time)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"sensor.test\")\n new_mean = round(sum(VALUES_NUMERIC[-2:]) / len(VALUES_NUMERIC[-2:]), 2)\n assert state is not None\n assert state.state == str(new_mean)\n assert state.attributes.get(\"buffer_usage_ratio\") == round(2 / 20, 2)\n assert state.attributes.get(\"age_coverage_ratio\") == 1 / 4\n\n # Values expire over time. Only one is left\n\n current_time += timedelta(minutes=1)\n freezer.move_to(current_time)\n async_fire_time_changed(hass, current_time)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"sensor.test\")\n new_mean = float(VALUES_NUMERIC[-1])\n assert state is not None\n assert state.state == str(new_mean)\n assert state.attributes.get(\"buffer_usage_ratio\") == round(1 / 20, 2)\n assert state.attributes.get(\"age_coverage_ratio\") == 0\n\n # Values expire over time. Buffer is empty\n\n current_time += timedelta(minutes=1)\n freezer.move_to(current_time)\n async_fire_time_changed(hass, current_time)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"sensor.test\")\n assert state is not None\n assert state.state == STATE_UNKNOWN\n assert state.attributes.get(\"buffer_usage_ratio\") == round(0 / 20, 2)\n assert state.attributes.get(\"age_coverage_ratio\") is None", "async def afterHoursAutoPurge(self, ctx: Context):", "def last_buy(self):\n multi_data = []\n while not self.infoQueue.empty():\n multi_data.append(self.infoQueue.get_nowait())\n self.redisHandle.set_multiple_data(multi_data)\n print(\"flush all data\")", "def __remove_expired_freezers(self, event: Event):\n if len(self.__freeze_map) == 0:\n # freeze option disabled\n return False\n self.__active_freezers = [freezer for freezer in self.__active_freezers\n if event.max_timestamp - freezer.min_timestamp <= self._pattern.window]", "def recache_updates(self):\n ks = ['BooksUpdated', '%s'%self.key().id()]\n decaches(ks)", "def _update(self, count=True, forced=False):", "def testDirtyRefresh(self):\n \n pass", "def on_expire(self, *args):\n\t\traise NotImplementedError", "def is_safe_cache(self):\n if self.get_last_update() > self.timestamp:\n return False\n return True", "def _drop_old_data(self, current_time):\n for k in self._buf.keys():\n timelimit = current_time - self._lifetime\n if (k < timelimit):\n del self._buf[k]", "def evict_or_add (self, item):", "def do_expire(self):\n # Deep copy to avoid RuntimeError: dictionary changed size during iteration\n _timeouts = deepcopy(self.timeouts)\n for key, value in _timeouts.items():\n if value - self.clock.now() < timedelta(0):\n del self.timeouts[key]\n # removing the expired key\n if key in self.redis:\n self.redis.pop(key, None)", "def expire(self):\n if not self.has_connection():\n self.generate_connection()\n self.connection.expire_hit(self.mturk_id)\n self.update()", "def invalidate_for(self):\r\n return None", "def updated(self):\n return self.expires != self.orig_expires", "def _check_expire(self):\n self._log.debug(\"Checking entry expiration...\")\n current_time = time.time()\n for key in self._obj_cache.keys():\n self._log.debug(' -> %s (type = %s)',\n key, type(self._obj_cache[key]))\n # Remove if the key has a timeout, and the timeout period has been\n # exceeded (last access + timeout period <= current_time).\n if self._obj_timeouts[key] > 0 \\\n and current_time >= (self._obj_last_access[key]\n + self._obj_timeouts[key]):\n self._log.debug(' EXPIRED -- removing')\n # delete\n del self._obj_cache[key]\n del self._obj_last_access[key]\n del self._obj_timeouts[key]", "def _expire(self):\n with self._lock:\n self._items.popleft()" ]
[ "0.6242827", "0.5879021", "0.57970774", "0.5760357", "0.5757112", "0.56892097", "0.5640932", "0.5618423", "0.55453074", "0.5532479", "0.55143934", "0.5485789", "0.5456731", "0.5439161", "0.54201627", "0.54194653", "0.5411782", "0.54014707", "0.53992915", "0.53857243", "0.53253543", "0.53195727", "0.53178704", "0.53076065", "0.53028363", "0.53011566", "0.5298054", "0.52964425", "0.52925986", "0.5277297" ]
0.66497093
0
Make sure orangered victories actually count
def test_orangered_victory(self): self.assertEqual(None, self.sapphire.owner) sess = self.sess self.battle.create_skirmish(self.alice, 5) self.battle.ends = self.battle.begins sess.commit() updates = Battle.update_all(sess) sess.commit() self.assertNotEqual(len(updates['ended']), 0) self.assertEqual(updates["ended"][0], self.battle) self.assertEqual(0, self.sapphire.owner)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trigger_violence(self):\n # First time offender get registered in the system and changes category into an Aggressor and a Victim\n if self.assaulted == 0:\n if self.stress > self.random.random():\n self.category = 'aggressor'\n self.assaulted += 1\n self.spouse.category = 'victim'\n self.spouse.got_attacked += 1\n\n # Second-time offender, checks to see if it is a recidivist.\n elif self.stress > self.random.random():\n self.assaulted += 1\n self.spouse.got_attacked += 1", "def CheckVictoryCondition(self):\n opponentVictory = True\n for char in self.screen.characters:\n if char.team == 1 and char.leader and not char.dead:\n opponentVictory = False\n if opponentVictory:\n self.screen.refresh()\n self.music.stop()\n sys.exit()\n\n for victory in self.victories:\n playerVictory = True\n nextLevel = victory['next_level']\n if victory['condition'] == 'destroy':\n for char in self.screen.characters:\n if not char.dead and char.team == 2:\n playerVictory = False\n elif victory['condition'] == 'kill leaders':\n for char in self.screen.characters:\n if not char.dead and char.team == 2 and char.leader:\n playerVictory = False\n if playerVictory:\n print('You win')\n if self.music:\n self.music.stop()\n self.screen.objects = []\n self.screen.tileEffects = []\n self = Level(self.screen, nextLevel)", "def _update_suspicion_0(self):\n\n for bucket in self.buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier", "def _update_suspicion_0(self):\n\n for bucket in self.used_buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier", "def _update_killed_players_eject_count(self, players_to_update):\n for player in players_to_update:\n player.times_ejected += 1", "def test_positive_electrode_potential_profile(self):\n\n # TODO: add these when have averages", "def test_get_damage_out_of_limit(self):\n self.veh.health = 0.24\n for op in self.veh.operators:\n op.health = 0.1\n self.veh.get_damage(0.5)\n self.assertEqual(self.veh.health, 0)\n self.assertEqual(self.veh.operators[0].health, 0.05)\n self.assertEqual(self.veh.operators[1].health, 0.05)", "def check_collisions(self):", "def get_marble_count(self):", "def get_status(self):\r\n return Character.victory_count", "def _update_suspicion_2(self):\n\n for bucket in self.buckets:\n multiplier = 1 if bucket.attacked else -1\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier", "def _update_suspicion_2(self):\n\n for bucket in self.used_buckets:\n multiplier = 1 if bucket.attacked else -1\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier", "def animal_eats(self):\n self.update_fodder()\n self.herbivore_eats()\n self.carnivore_eats()", "def is_hungry(self) -> bool:\n if self.eat_count <= 3:\n return True\n else:\n return False", "def crates_destroyed(self, game_state: dict):\n\n bomb_position_x = game_state['self'][3][0]\n bomb_position_y = game_state['self'][3][1]\n n_crates = 0\n\n for i in range(3):\n if bomb_position_x - i - 1 >= 0:\n if game_state['field'][bomb_position_x - i - 1][bomb_position_y] == 1:\n n_crates += 1\n elif game_state['field'][bomb_position_x - i - 1][bomb_position_y] == -1:\n break\n\n for i in range(3):\n if bomb_position_x + i + 1 <= 16:\n if game_state['field'][bomb_position_x + i + 1][bomb_position_y] == 1:\n n_crates += 1\n elif game_state['field'][bomb_position_x + i + 1][bomb_position_y] == -1:\n break\n\n for i in range(3):\n if bomb_position_y - i - 1 >= 0:\n if game_state['field'][bomb_position_x][bomb_position_y - i - 1] == 1:\n n_crates += 1\n elif game_state['field'][bomb_position_x][bomb_position_y - i - 1] == -1:\n break\n\n for i in range(3):\n if bomb_position_y + i + 1 <= 16:\n if game_state['field'][bomb_position_x][bomb_position_y + i + 1] == 1:\n n_crates += 1\n elif game_state['field'][bomb_position_x][bomb_position_y + i + 1] == -1:\n break\n\n return n_crates", "def addSanity(self):\n\t\tself.sanity += 1\n\t\tif self.sanity > 10:\n\t\t\tself.sanity = 10", "def num_bad_votes(self):\n return self.qualities.filter(correct=False).count()", "def test_get_damage_out_of_limit(self):\n self.sold.health = 0.2\n self.sold.get_damage(0.32)\n self.assertEqual(self.sold.health, 0)", "def setCaptured(self):\r\n total = 0\r\n for x in self.animals:\r\n if x.captured == True:\r\n total += 1\r\n self.captured = total", "def checkForPickup(self):\n if self.counter == 0:\n if self.game.player.reticule in self.overlapping_sprites and (games.keyboard.is_pressed(games.K_a) \\\n or games.keyboard.is_pressed(games.K_d)):\n self.counter = 15\n if self.held == 0:\n self.game.player.held_item = self\n self.held = 1\n self.y = self.game.player.y\n else:\n self.game.player.held_item = None\n self.held = 0", "def after_attack(self):\n for key in self.troop_list:\n self.troop_list[key][1].health += (5 * self.troop_list['Priest'][0])\n self.troop_list[key][1].attack += (5 * self.troop_list['BlackSmith'][0])", "def game_over(self):\n return self.lives() < 0", "def hives_count(self) -> int:\n return self.hives.count()", "def inhabitant_check(self):\n\t\tchanged = False\n\t\tif self.happiness > self.__get_data(\"happiness_inhabitants_increase_requirement\") and \\\n\t\t\t self.inhabitants < self.inhabitants_max:\n\t\t\tself.inhabitants += 1\n\t\t\tchanged = True\n\t\t\tself.log.debug(\"%s: inhabitants increase to %s\", self, self.inhabitants)\n\t\telif self.happiness < self.__get_data(\"happiness_inhabitants_decrease_limit\") and \\\n\t\t self.inhabitants > 1:\n\t\t\tself.inhabitants -= 1\n\t\t\tchanged = True\n\t\t\tself.log.debug(\"%s: inhabitants decrease to %s\", self, self.inhabitants)\n\n\t\tif changed:\n\t\t\t# see http://wiki.unknown-horizons.org/index.php/DD/Economy/Supplying_citizens_with_resources\n\t\t\tself.alter_production_time( 1 + (float(self.inhabitants)/10))\n\t\t\tself._changed()", "def flaky_count(self) -> int:\n return pulumi.get(self, \"flaky_count\")", "def recalculate_popularity(self):\n self.voters = 0\n for x in self.votes:\n self.voters += 1\n if x.good:\n self.popularity += 1\n else:\n self.popularity -= 1", "def take_attendance():\n\t\tcount = 0\n\t\tfor person in Simulation.community:\n\t\t\tif Simulation.community[person].went_to_bar():\n\t\t\t\tcount += 1\n\t\tprint(count)\n\t\tStrategy.evalScore(count)\n\t\tSimulation.eval_randoms(count)\n\t\tSimulation.add_to_memory(count)", "def update(self):\n\n virusesCopy = self.getViruses()\n for v in virusesCopy : \n if v.doesClear() == True:\n self.getViruses().remove(v)\n \n newDensity = len(self.getViruses())/self.getMaxPop()\n \n survivingVirusesCopy = self.getViruses()[:]\n \n for v in survivingVirusesCopy : \n try:\n self.getViruses().append(v.reproduce(newDensity,self.getPrescriptions()))\n newDensity = len(self.getViruses())/self.getMaxPop()\n except NoChildException: \n continue\n return len(self.getViruses())", "def test_UpdateHealthLessThan0 (self) :\n\t\t\n\t\tself.person3.updateHealth ()\n\t\tself.assertEqual(self.person3.getHealth(), \\\n\t\t100 + self.healthEffect2)\n\t\tself.person3.updateHealth ()\n\t\tself.assertEqual(self.person3.getHealth(), 0)", "def lives_counter(self):\n count = 15\n for row in self.board:\n for column in row:\n if column == HITSHIP:\n count -= 1\n self.lives = count\n return self.lives" ]
[ "0.63099396", "0.59010875", "0.58280236", "0.5823622", "0.5699804", "0.5681795", "0.5663191", "0.5559387", "0.5555499", "0.5537396", "0.55281454", "0.55118805", "0.5493975", "0.54680467", "0.5466667", "0.5458474", "0.5455572", "0.54442436", "0.5437801", "0.5426647", "0.54130894", "0.54103935", "0.5399988", "0.539937", "0.5397396", "0.5384799", "0.5382693", "0.53793746", "0.53745174", "0.5365347" ]
0.6376024
0
Make sure homeland defense buffs work properly
def test_homeland_defense(self): self.conf["game"]["homeland_defense"] = "100/50/25" self.assertEqual(None, self.sapphire.owner) battle = self.battle sess = self.sess # Skirmish 1 s1 = battle.create_skirmish(self.alice, 10) # Attack 10 s1a = s1.react(self.carol, 4, hinder=False) # --Support 4 s1a.react(self.bob, 3) # ----Attack 3 s1.react(self.dave, 8) # --Attack 8 # Winner will be team orangered, 11 VP # Skirmish 2 battle.create_skirmish(self.bob, 5) # Attack 5 # Winner will be team periwinkle, 10 VP for unopposed # End fight self.battle.ends = self.battle.begins sess.commit() self.assert_(battle.past_end_time()) updates = Battle.update_all(sess, conf=self.conf) sess.commit() # Overall winner would ordinarily be orangered, but this should get # modified by the homeland defense buffs - Sapphire is right next to # the test capital, and so will get an extra 50% self.assertEqual(battle.victor, 1) self.assertEqual(battle.score1, 15) # OR should get 25% bonus self.assertEqual(battle.score0, 13)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_buff_nostacking(self):\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 20) # Attack 20 infantry\n s1.react(self.bob, 20) # -- oppose 20 infantry\n s1.react(self.dave, 6) # -- oppose 6 infantry\n\n s1.buff_with(db.Buff.first_strike())\n s1.buff_with(db.Buff.first_strike())\n\n result = s1.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.bob.team)\n self.assertEqual(result.margin, 1)\n self.assertEqual(result.vp, 20)", "def bomb_defused(event_var):\r\n debug.write(\"[SourceRPG] Handling bomb_defused\", 1)\r\n if isFairForTeam(event_var['es_userteam']) or not int(unfairAdvantage):\r\n if es.isbot(event_var['userid']) and not int(botsGetXpOnEvents):\r\n return\r\n player = players[event_var['userid']]\r\n player.addXp( int(bombDefuseXp) * player['level'], 'defusing the bomb' )\r\n debug.write(\"[SourceRPG] bomb_defused handled\", 1)", "def tick(self):\n\n commander = self.commander\n our_flag = commander.game.team.flag.position\n targetLocation = commander.game.team.flagScoreLocation\n\n if self.defender and (self.defender.health <= 0 or self.defender.flag):\n self.defender = None\n\n # First process bots that are done with their orders...\n for bot in commander.game.bots_available:\n\n\n\n if ( self.defender == None or bot.flag) and len(commander.game.bots_alive) > 1:\n self.defender = bot\n targetMin = our_flag - Vector2(2.0, 2.0)\n targetMax = our_flag + Vector2(2.0, 2.0)\n position = commander.level.findRandomFreePositionInBox((targetMin,targetMax))\n if position:\n their_flag = commander.game.enemyTeam.flag.position\n their_base = commander.level.botSpawnAreas[commander.game.enemyTeam.name][0]\n their_score = commander.game.enemyTeam.flagScoreLocation\n commander.issue(orders.Defend, self.defender, [(p-bot.position, t) for p, t in [(our_flag, 5.0), (their_flag, 2.5), (their_base, 2.5), (their_score, 2.5)]], description = 'defending by scanning')\n\n # If we captured the flag\n if commander.game.enemyTeam.flag.carrier != None:\n # Return the flag home relatively quickly!\n targetMin = targetLocation - Vector2(2.0, 2.0)\n targetMax = targetLocation + Vector2(2.0, 2.0)\n position = commander.level.findRandomFreePositionInBox((targetMin,targetMax))\n commander.issue(orders.Charge, bot, position, description = 'returning enemy flag!')\n # In this case, the flag has not been captured yet\n else:\n path = [commander.game.enemyTeam.flag.position]\n\n if random.choice([True, False]):\n targetPosition = commander.game.team.flag.position\n targetMin = targetPosition - Vector2(8.0, 8.0)\n targetMax = targetPosition + Vector2(8.0, 8.0)\n position = commander.level.findRandomFreePositionInBox((targetMin,targetMax))\n if position and (targetPosition - position).length() > 3.0:\n commander.issue(orders.Charge, bot, position, description = 'defending the flag')\n else:\n commander.issue(orders.Charge, bot, path, description = 'attacking enemy flag')\n\n\n # Process the bots that are waiting for orders, bots are in a holding attack pattern.\n holding = len(commander.game.bots_holding)\n for bot in commander.game.bots_holding:\n if holding > 3:\n commander.issue(orders.Charge, bot, random.choice([b.position for b in bot.visibleEnemies]))", "def followUpDefend(self,targets,discardPile): # need to add defending with same ranking card\n \"\"\"Accept targets as a list\"\"\"\n \"\"\"Return list - discardCards (if 0 means defender accepts all the cards)\"\"\"\n if len(self.currentHand) < len(targets): #Goes against the rules of the game\n Error(\"Incorrect amount of targets\")\n discardCards = discardPile\n forfeit = False\n if self.AI:\n Error(\"AI not yet implemented for defending\")\n else:\n print(\"Cards that are currently attacking P\" + str(self.playerid) + \":\")\n cardManager.printNon(targets)\n print(\"Cards in P\" + str(self.playerid) + \" hand to defend with:\")\n cardManager.printHand(self.currentHand)\n for attackCard in targets: # iterate thru all attackers\n validDefend = False\n defendCard = 0\n while validDefend == False and forfeit == False:\n print(\"which card do you want to defend with from:\" , end=\" \")\n cardManager.printNon([attackCard])\n defendCard = int(input())\n while defendCard not in self.currentHand: # input checking\n defendCard = int(input(\"which card do you want to defend with?\"))\n # check if defenderCard is larger/ choose new card or give up\n validDefend = cardManager.compare(defendCard,attackCard)\n if validDefend == False:\n print(\"Failed defense...\")\n prompt = input(\"Do you wish to give up defense? (y/n)\")\n while prompt != \"y\" and prompt != 'n': # input checking\n prompt = input(\"Do you wish to give up defense? (y/n)\")\n if prompt == 'y':\n forfeit = True\n break\n else:\n print(\"valid defend!\")\n self.currentHand.remove(defendCard)\n discardCards.append(defendCard)\n discardCards.append(attackCard)\n if forfeit:\n break\n #results handling:\n if forfeit:\n for card in discardCards:\n self.currentHand.append(card)\n for card in targets:\n if card not in self.currentHand:\n self.currentHand.append(card)\n discardCards.clear()\n return discardCards", "def tick(self):\n\n #Simplified commands\n captured = self.captured()\n commander = self.commander\n targetLocation = commander.game.team.flagScoreLocation\n enemyFlagLocation = commander.game.enemyTeam.flag.position\n our_flag = commander.game.team.flag.position\n\t\t\n\t\t#when we have no defender or dead, we need one when free\n if self.defender and (self.defender.health <= 0):\n self.defender = None\n\n # First process bots that are done with their orders or they don't have any order yet\n for bot in commander.game.bots_available:\n if self.defender == None and self.commander.game.enemyTeam.flag.carrier != bot:\n self.defender = bot\n\n targetMin = our_flag - Vector2(2.0, 2.0)\n targetMax = our_flag + Vector2(2.0, 2.0)\n position = commander.level.findRandomFreePositionInBox((targetMin,targetMax))\n if (our_flag-bot.position).length() > 2:\n commander.issue(orders.Charge, self.defender, position, description = 'run to the flag')\n else:\n commander.issue(orders.Attack, bot, position, description = 'defend around flag')\n else:\n if self.defender == bot:\n self.defender = None\n if captured:\n # Return the flag home\n targetMin = targetLocation - Vector2(4.0, 4.0)\n targetMax = targetLocation + Vector2(4.0, 4.0)\n position = commander.level.findRandomFreePositionInBox((targetMin,targetMax))\n commander.issue(orders.Charge, bot, position, description = 'return enemy flag!')\n else:\n # Find the enemy team's flag position and run to that.\n if random.choice([True, False]):\n commander.issue(orders.Attack, bot, enemyFlagLocation, description = 'Attack flag!')\n else:\n commander.issue(orders.Charge, bot, enemyFlagLocation, description = 'Charge flag!')\n\n for bot in commander.game.bots_holding:\n if captured:\n targetLocation = commander.game.team.flagScoreLocation\n commander.issue(orders.Charge, bot, targetLocation , description = 'return enemy flag!')\n else:\n commander.issue(orders.Charge, bot, enemyFlagLocation, description = 'Charge flag!')", "def on_attack(self, target, friendly):\n # Get buff from Dread Admiral Eliza\n if self.race == 'pirate' or self.race == 'all':\n eliza_buff_atk, eliza_buff_dfs = friendly.friendly_eliza_buff\n for each in friendly.minions:\n each.get_buff(eliza_buff_atk, eliza_buff_dfs)\n\n # If divine shield, not getting hurt\n if not self.divine_shield:\n self.hurt = True\n if not target.divine_shield:\n target.hurt = True", "def begin_defuse(game_event):\n # Get the defuser\n player = PlayerEntity(index_from_userid(game_event.get_int('userid')))\n\n # Get the bomb's instance\n bomb = get_bomb_entity()\n\n # Get whether the defuser has time to defuse\n gonna_blow = bomb.defuse_length > bomb.timer_length\n\n # Is the defuser a bot?\n if player.is_fake_client():\n\n # Get the bot convar's value\n bot_setting = bot_choose_wire.get_int()\n\n # Should the bot cut a wire?\n if (bot_setting == 1 and gonna_blow) or bot_setting == 2:\n\n # Cut a wire\n cut_chosen_wire(choice(_colors), player)\n\n # No need to go further\n return\n\n # Get the send menu convar's value\n send_setting = send_menu.get_int()\n\n # Get whether the defuser has a kit\n has_kit = game_event.get_bool('haskit')\n\n # Should the wire cut menu be sent to the defuser?\n if (send_setting == 1 or (send_setting == 2 and gonna_blow) or\n (send_setting == 3 and (gonna_blow or not has_kit))):\n\n # Send the wire cut menu to the defuser\n wire_menu.send(player.index)", "def normal_defense(self):\n if self.game.get_my_mana() > DEFENSE_MANA_CAP:\n self.portals.dumb_castle_defense(DEFENSE_MANA_CAP)\n self.portals.dumb_portal_defense(PORTAL_SELF_DEFENSE_MANA_CAP)", "def event_m20_11_x73(z52=_):\n \"\"\"State 0,1: Defeat determination\"\"\"\n IsChrDead(0, z52)\n assert ConditionGroup(0)\n \"\"\"State 2: End state\"\"\"\n return 0", "def event_player_bust(self) -> None:\n print(f\"Your hand contains {min(self.user.hand.value)}, you're bust\")\n self.event_house_wins()", "def action_normal(self):\n obs = self.observation\n shoot = False\n eb = self.__class__.enemy_base\n \n ammopacks = filter(lambda x: x[2] == \"Ammo\", obs.objects)\n if ammopacks:\n self.updateAllAmmoSpots(ammopacks)\n # Walk to ammo\n if obs.ammo < SUFFICIENT_AMMO:\n self.goal = self.getClosestLocation(ammopacks)\n self.motivation = MOTIVATION_AMMO\n self.debugMsg(\"*> Recharge (%d,%d)\" % (self.goal[0],self.goal[1]))\n \n '''if (obs.ammo > 0 and obs.foes):\n self.goal = self.getClosestLocation(obs.foes)\n self.debugMsg(\"*> Go to enemy (%d,%d)\" % self.goal)\n # If the enemy is within range, shoot.\n if(point_dist(self.goal, obs.loc) < self.settings.max_range\n and not line_intersects_grid(obs.loc, self.goal, self.grid, self.settings.tilesize)):\n self.debugMsg(\"*> Shoot (%d,%d)\" % self.goal)\n #if self.goal not in obs.friends:\n self.motivation = MOTIVATION_SHOOT_TARGET\n shoot = True'''\n \n # Attack strategy 1\n #########################\n # 1) Shoot live enemies #\n #########################\n # Aim at the closest enemy outside the enemy base\n if obs.ammo > 0 and obs.foes:\n living = filter(lambda x: point_dist(x[0:2], eb) > ENEMY_BASE_RANGE, obs.foes)\n self.debugMsg(\"Living: %s\" % (living,))\n if living:\n self.debugMsg(1)\n self.goal = min(living, key=lambda x: point_dist(obs.loc, x[0:2]))[0:2]\n self.motivation = MOTIVATION_SHOOT_TARGET\n self.debugMsg(2)\n # Check if enemy in fire range\n if (\n point_dist(self.goal, obs.loc) < self.settings.max_range and\n not line_intersects_grid(\n obs.loc, \n self.goal, \n self.grid, \n self.settings.tilesize\n )\n ):\n self.debugMsg(3)\n self.debugMsg(\"*> Shoot (%d,%d)\" % self.goal)\n #return self.getActionTriple(True,None,0) ###?? SHOULD WE STOP MOVING WHEN WE SHOOT?\n return self.getActionTriple(True)\n else:\n self.debugMsg(4)\n return self.getActionTriple()\n self.debugMsg(5)\n \n # Walk to an enemy CP\n if self.goal is None and len(self.friendlyCPs) < 2:\n self.goal = self.getClosestLocation(self.getQuietEnemyCPs())\n if self.goal:\n self.debugMsg(\"Crowded location: %d\" % self.getCrowdedValue(self.goal))\n self.motivation = MOTIVATION_CAPTURE_CP\n self.debugMsg(\"*> Capture (%d,%d)\" % (self.goal[0],self.goal[1]))\n \n '''# If you can't think of anything to do\n # at least walk to a friendly control point\n if self.goal is None:\n self.goal = self.getClosestLocation(self.getQuietRestlessFriendlyCPs())\n if self.goal:\n self.motivation = MOTIVATION_GUARD_CP\n self.debugMsg(\"*> Guard (%d,%d)\" % (self.goal[0],self.goal[1]))'''\n \n if self.goal is None:\n self.goal = max(\n self.__class__.ammoSpots,\n key=lambda x: point_dist(x, obs.loc),\n )\n self.debugMsg(\"Going to ammospot far away (%d, %d)\" % (self.goal[0],self.goal[1]))\n self.motivation = MOTIVATION_STAY_PUT\n \n\n if self.goal:\n return self.getActionTriple(shoot)\n else:\n return self.getActionTriple(shoot)", "def event11512150():\n header(11512150, 1)\n ally, = define_args('i')\n if_event_flag_on(1, EVENT.DarkAnorLondo)\n if_entity_attacked_by(1, ally, CHR.Player)\n if_condition_true(0, 1)\n wait(1.0) # You have to attack them twice.\n if_entity_attacked_by(0, ally, CHR.Player)\n chr.set_team_type(ally, TeamType.hostile_ally)", "def update(self, g):\n \n self.game = g\n \n #if the player is dead, KILL THEM\n if self.hp[0] <= 0 and self.dead == False:\n self.dead = True\n self.deadt = 0\n #clear debuffs\n\n if self.dead == True:\n self.deadt += g.deltaT / 1000.0\n if self.deadt > self.reviveTime: #recussitate after 30 seconds\n self.dead = False\n self.hp[0] = self.hp[1]\n return #if dead, ignore input and all other updates\n \n elif self.dead == False:\n self.hp[0] += self.regen * g.deltaT / 1000.0\n if self.hp[0] > self.hp[1]:\n self.hp[0] = self.hp[1]\n self.mana[0] += self.manaRegen * g.deltaT / 1000.0\n if self.mana[0] > self.mana[1]:\n self.mana[0] = self.mana[1]\n self.attackTimer += self.attackSpeedMultiplier * g.deltaT / 1000.0\n #check debuffs\n self.checkBurning()\n self.checkChilled()\n self.checkShocked()\n self.checkParalyzed()\n \n \n #AURA\n for skill in self.skill:\n if skill.skillKey == 0 and skill.active == True: #aura is on\n #take mana\n self.mana[0] -= float(skill.skillCost) * g.deltaT / 1000.0\n #damage all creeps in AoE\n r = 4 * 24 #the radius of the AoE, in pixels at zoom = 1.\n for creep in g.creeps:\n if ( (creep.rect.centerx - self.rect.centerx) ** 2 + (creep.rect.centery - self.rect.centery) ** 2 ) ** 0.5 < r:\n creep.take_damage( self.attack * 0.1 * g.deltaT / 1000.0, 2 ) #THIS SHOULD IGNORE ABSORBTION\n #apply debuffs, based on type\n if skill.skillAttr == 0: #fire\n creep.applyBurning()\n elif skill.skillAttr == 1: #frost\n creep.applyChilled()\n elif skill.skillAttr == 2: #lightning\n creep.applyShocked()\n \n #buff all players in AoE\n\n #AI\n if self.active == False and self.attackTimer >= self.attackDelay:\n self.do_ai()\n \n #collision detection\n self.collision = [False, False]\n #Needs to be floats to ensure the player doesn't get stuck in a wall (rounding errors cause this)\n self.futurex = self.x + self.speed * self.direction[0] * g.deltaT / 1000.0\n self.futurey = self.y + self.speed * self.direction[1] * g.deltaT / 1000.0\n \n #can't move outside the bounds of game area\n if self.futurex < 0 or self.futurex + self.rect.width > g.mapSize[0] * 24:\n #cannot move in x\n self.collision[0] = True\n if self.futurey < 0 or self.futurey + self.rect.height > g.mapSize[1] * 24:\n #cannot move in y\n self.collision[1] = True\n \n #tile collision\n for x in range( int(self.x / 24) - 1, int(self.x / 24) + 2):\n for y in range( int( (self.y + 8) / 24) - 1, int( (self.y + 8) / 24) + 2):\n if x > -1 and x < g.mapSize[0] and y > -1 and y < g.mapSize[1]:\n if g.tiles[x][y].blocking == True:\n #test if you would be in them (24 x 24 area, cut off head top)\n if self.futurex >= x * 24 and self.futurex <= x * 24 + 24 or \\\n self.futurex + 24 >= x * 24 and self.futurex + 24 <= x * 24 + 24:\n if self.futurey + 8 >= y * 24 and self.futurey + 8 <= y * 24 + 24 or \\\n self.futurey + 24 + 8 >= y * 24 and self.futurey + 24 + 8 <= y * 24 + 24:\n self.collision[0] = True\n self.collision[1] = True\n \n \n #move (or don't)\n if self.collision[0] == False:\n self.x += self.speed * self.direction[0] * g.deltaT / 1000.0\n self.rect.move_ip( (int)(self.x - self.rect.x), 0)\n if self.collision[1] == False:\n self.y += self.speed * self.direction[1] * g.deltaT / 1000.0\n self.rect.move_ip( 0, (int)(self.y - self.rect.y) )\n \n #parse direction\n if self.direction[0] == 1:\n self.frameDirection = 1\n elif self.direction[0] == -1:\n self.frameDirection = 3\n if self.direction[1] == 1:\n self.frameDirection = 0\n elif self.direction[1] == -1:\n self.frameDirection = 2\n \n #animate\n if self.direction != [0, 0]: #player is moving\n self.frameTimer += g.deltaT\n if self.frameTimer > self.frameDelay:\n self.frameTimer = 0\n self.frame += 1\n if self.frame > self.frameMax:\n self.frame = 0\n else: #player is idle\n self.frame = 0", "def heal(self):\n self.infected = False", "def event11512200():\n header(11512200, 1)\n\n skip_if_event_flag_on(7, EVENT.AnorLondoGwynWarp)\n chr.disable(CHR.Gwyn)\n chr.disable(CHR.GiantCrow)\n chr.disable(BlackKnights[0])\n chr.disable(BlackKnights[1])\n chr.disable(BlackKnights[2])\n chr.disable(BlackKnights[3])\n end()\n\n for boss_id in (CHR.Ornstein, CHR.SuperOrnstein, CHR.Smough, CHR.SuperSmough):\n chr.disable(boss_id)\n for knight in BlackKnights:\n chr.enable_immortality(knight)\n chr.disable_health_bar(knight)\n chr.disable(knight)\n\n anim.force_animation(CHR.Player, ANIM.SummonFadeIn)\n\n chr.enable_invincibility(CHR.GiantCrow)\n chr.disable_gravity(CHR.GiantCrow)\n chr.disable_collision(CHR.GiantCrow)\n chr.set_special_effect(CHR.Gwyn, 620) # add light to Gwyn\n chr.set_special_effect(CHR.Gwyn, 3170) # add lightning to Gwyn's weapon\n\n chr.disable_ai(CHR.Gwyn)\n anim.force_animation(CHR.Gwyn, 200, loop=True)\n wait(2.0)\n anim.force_animation(CHR.Gwyn, 200)\n chr.enable_ai(CHR.Gwyn)\n sound.enable_map_sound(1513805)\n boss.enable_boss_health_bar(CHR.Gwyn, 5375)\n flag.disable(EVENT.AnorLondoGwynWarp)\n\n flag.enable(11515360)\n\n if DEBUG.FAST_GWYN_KNIGHTS:\n wait(10.0)\n else:\n wait(135.0) # Time it takes for Soul of Cinder music to get to piano part.\n\n chr.ai_instruction(CHR.Gwyn, 1, 0)\n anim.force_animation(CHR.Gwyn, 3030)\n wait(2.1)\n if __REMASTERED:\n light.set_area_texture_parambank_slot_index(15, 2)\n else:\n light.set_area_texture_parambank_slot_index(15, 1)\n wait(3.0)\n chr.rotate_to_face_entity(CHR.Gwyn, CHR.Player)\n\n end_if_event_flag_on(11512201) # Gwyn already dead, no Black Knights.\n\n flag.enable(BlackKnightTurnFlags[0]) # Sword spawns first.\n run_event(11512202) # Black Knight spawn manager\n for slot, (knight, knight_active_flag) in enumerate(zip(BlackKnights, BlackKnightActiveFlags)):\n run_event_with_slot(11512210, slot, args=(knight, knight_active_flag)) # Death triggers", "def playerdefeated(self):\n globalvalues.gameover_combat()", "def event11512060():\n header(11512060, 1)\n chr.disable(CHR.CapriciousThrall)\n end_if_this_event_on()\n end_if_event_flag_on(EVENT.CapriciousThrallDead)\n\n if_event_flag_on(0, EVENT.CapriciousThrallActive)\n chr.disable(CHR.SilverKnightArcherNearThrall)\n\n if_event_flag_on(1, EVENT.CapriciousThrallActive)\n if_host(1)\n if_player_inside_region(1, REGION.CapriciousThrallTrigger)\n if_condition_true(0, 1)\n\n # Ambush.\n flag.enable(EVENT.ThrallAmbushOngoing) # Ambush is ongoing. Note this MUST be enabled before the flag below.\n flag.enable(11512060) # One-off ambush is done.\n flag.enable(11502003) # Thrall won't appear in Sen's.\n flag.enable(11502004) # Thrall won't appear in Sen's.\n obj.enable(1511974)\n sfx.create_map_sfx(1511975)\n obj.enable(1511976)\n sfx.create_map_sfx(1511977)\n obj.enable(1511978)\n sfx.create_map_sfx(1511979)\n chr.enable(CHR.CapriciousThrall)\n anim.force_animation(CHR.CapriciousThrall, ANIM.ThrallAmbushAttack)\n wait(0.5)\n sound.enable_map_sound(1513804)\n boss.enable_boss_health_bar(CHR.CapriciousThrall, TEXT.CapriciousThrallName)\n wait(100.0) # Battle timer.\n end_if_event_flag_on(11512061) # Already dead and handled.\n boss.disable_boss_health_bar(CHR.CapriciousThrall, TEXT.CapriciousThrallName)\n sound.play_sound_effect(CHR.CapriciousThrall, SoundType.s_sfx, 777777777) # For effect.\n wait(3.0) # so sound effect can build up and slightly mask the abrupt music stop\n sound.disable_map_sound(1513804)\n anim.force_animation(CHR.CapriciousThrall, ANIM.ThrallRetreat)\n wait(1.4)\n chr.disable(CHR.CapriciousThrall)\n obj.disable(1511974)\n sfx.delete_map_sfx(1511975)\n obj.disable(1511976)\n sfx.delete_map_sfx(1511977)\n obj.disable(1511978)\n sfx.delete_map_sfx(1511979)\n message.status_explanation(TEXT.ThrallHasFled)\n flag.enable(11512008) # Message won't appear when you come back.", "def teleopPeriodic(self):\n\n try:\n if self.debounce(6, gamepad=True):\n self.boulder_automation.toggle_shoot_boulder()\n except:\n self.onException()\n \n try:\n if self.debounce(2) or self.debounce(1, gamepad=True):\n self.boulder_automation.toggle_intake_boulder()\n except:\n self.onException()\n\n try:\n if self.debounce(7):\n self.chassis.toggle_field_oriented()\n except:\n self.onException()\n\n try:\n if self.debounce(8):\n enabled = self.heading_hold_pid.isEnable()\n self.heading_hold_pid.disable()\n self.bno055.resetHeading()\n self.heading_hold_pid.setSetpoint(constrain_angle(self.bno055.getAngle()))\n self.heading_hold_pid.reset()\n if enabled:\n self.heading_hold_pid.enable()\n except:\n self.onException()\n\n \"\"\"try:\n if self.debounce(10):\n self.chassis.toggle_vision_tracking()\n except:\n self.onException()\"\"\"\n\n try:\n if self.debounce(10):\n self.chassis.toggle_range_holding(self.chassis.correct_range)\n except:\n self.onException()\n\n try:\n if self.debounce(1) or self.debounce(8, gamepad=True):\n self.boulder_automation.toggle_shoot_boulder()\n except:\n self.onException()\n\n try:\n if self.debounce(9):\n self.chassis.toggle_heading_hold()\n except:\n self.onException()\n\n try:\n if self.debounce(4):\n self.defeater.up()\n except:\n self.onException()\n\n try:\n if self.debounce(5):\n self.shooter.stop()\n self.intake.stop()\n except:\n self.onException()\n\n try:\n if self.debounce(3):\n #self.chassis.range_setpoint = self.chassis.correct_range\n #self.chassis.distance_pid.enable()\n # self.shooter.start_shoot()\n self.chassis.range_setpoint = 0.0\n self.chassis.track_vision = False\n self.chassis.toggle_range_holding()\n self.chassis.toggle_vision_tracking()\n except:\n self.onException()\n\n try:\n if self.debounce(6):\n self.defeater.down()\n except:\n self.onException()\n\n \"\"\"try:\n if self.debounce(10):\n self.shooter.backdrive()\n self.intake.backdrive()\n except:\n self.onException()\"\"\"\n\n try:\n if self.joystick.getPOV() != -1:\n self.chassis.heading_hold = True\n direction = 0.0\n if self.joystick.getPOV() == 0:\n # shooter centre goal\n direction = math.pi\n elif self.joystick.getPOV() == 90:\n # shooter right goal\n direction = math.pi / 3.0 + math.pi\n elif self.joystick.getPOV() == 270:\n # shooter left goal\n direction = -math.pi / 3.0 + math.pi\n elif self.joystick.getPOV() == 180:\n direction = 0.0\n self.chassis.set_heading_setpoint(direction)\n except:\n self.onException()\n\n try:\n if self.joystick.getRawButton(11) or self.gamepad.getRawButton(2):\n self.chassis.field_oriented = False \n else:\n self.chassis.field_oriented = True\n except:\n self.onException()\n\n try:\n if self.gamepad.getRawButton(3):\n self.boulder_automation.engage(\"backdrive_manual\")\n elif self.boulder_automation.current_state == \"backdrive_manual\":\n self.boulder_automation.done()\n except:\n self.onException()\n\n \"\"\"try:\n if self.debounce(1, gamepad=True):\n self.chassis.zero_encoders()\n self.chassis.distance_pid.setSetpoint(1.2)\n self.chassis.distance_pid.enable()\n except:\n self.onException()\"\"\"\n\n try:\n if self.debounce(10, gamepad=True):\n self.vision.write_image()\n except:\n self.onException()\n\n try:\n if self.joystick.getRawButton(12):\n self.joystick_rate = 0.6\n else:\n self.joystick_rate = 0.4\n except:\n self.onException()\n\n self.chassis.inputs = [-rescale_js(self.joystick.getY(), deadzone=0.05, exponential=1.2),\n - rescale_js(self.joystick.getX(), deadzone=0.05, exponential=1.2),\n - rescale_js(self.joystick.getZ(), deadzone=0.2, exponential=15.0, rate=self.joystick_rate),\n (self.joystick.getThrottle() - 1.0) / -2.0\n ]\n for input in self.chassis.inputs[0:3]:\n if input != 0.0:\n # Break out of auto if we move the stick\n self.chassis.distance_pid.disable()\n self.chassis.range_setpoint = None\n self.chassis.track_vision = False\n # self.chassis.field_oriented = True\n self.putData()", "async def fight(self, ctx):\r\n attacker = ctx.message.author.name\r\n defenders = ctx.message.mentions\r\n # only continue if valid attacker and defender\r\n attacker_ship = Ship.find_ship(attacker)\r\n if not attacker_ship:\r\n await ctx.send('{0}, you do not have a ship! `$ship` to get one'.format(ctx.message.author.mention))\r\n return\r\n if not defenders:\r\n await ctx.send('Who are you fighting? `$fight @user` to fight someone')\r\n # reset cooldowns when not successful fights\r\n # self.fight.reset_cooldown()\r\n return\r\n elif len(defenders) > 1:\r\n await ctx.send('Who are you fighting? One at a time (for now)')\r\n return\r\n else:\r\n defender = defenders[0].name\r\n\r\n if attacker == defender:\r\n attacker_ship.gold -= 50\r\n if attacker_ship.gold < 0:\r\n attacker_ship.gold = 0\r\n attacker_ship.update()\r\n await ctx.send('A mutiny has started on {0}\\'s ship! The treasure hold has been ransacked! '\r\n '{1} gold was taken.'.format(defender, 50))\r\n return\r\n\r\n defender_ship = Ship.find_ship(defender)\r\n if not defender_ship:\r\n await ctx.send('{0} does not have a ship! There are no fights'\r\n ' on the high sea if there are no ships to fight'.format(defender))\r\n return\r\n\r\n # actually start fight\r\n em = discord.Embed(title='{0} has attacked {1} :rage: '.format(attacker, defender), colour=0xDDDD00)\r\n\r\n # calculate who wins based on their attack and defense plus random number\r\n attacker_ship.repair_hull()\r\n defender_ship.repair_hull()\r\n attacker_msg = ''\r\n defender_msg = ''\r\n\r\n while attacker_ship.hull > 0 and defender_ship.hull > 0:\r\n attack = random.randint(1, 100)\r\n attack += attacker_ship.cannons + attacker_ship.crew\r\n\r\n defense = random.randint(1, 100)\r\n defense += defender_ship.cannons + defender_ship.crew\r\n\r\n defender_ship.damage_hull(attack)\r\n attacker_ship.damage_hull(defense)\r\n\r\n attacker_msg += 'Fired a volley of **{}** cannonballs <a:cannon:554558216889958400> \\n'.format(attack)\r\n defender_msg += '<a:cannon_reversed:554722119905181735> Return fired a volley of **{}** cannonballs \\n'.format(defense)\r\n\r\n\r\n\r\n if attacker_ship.hull > defender_ship.hull: # attacker wins\r\n # base gold at 100, more gold earned for harder fights, less or easier ones\r\n gold = 100 + (defender_ship.level() - attacker_ship.level()) * 2\r\n gold = gold if gold > 0 else 0\r\n attacker_ship.gold += gold\r\n attacker_ship.win += 1\r\n defender_ship.loss += 1\r\n # reset hulls just in case\r\n attacker_ship.repair_hull()\r\n defender_ship.repair_hull()\r\n em.add_field(name=\"__{}__ HP: {}\".format(attacker, attacker_ship.hull), value=attacker_msg, inline=True)\r\n em.add_field(name=\"__{}__ HP: {}\".format(defender, defender_ship.hull), value=defender_msg, inline=True)\r\n attacker_ship.update()\r\n defender_ship.update()\r\n\r\n em.add_field(name='{} is the winner! :crossed_swords:'.format(attacker),\r\n value='<a:treasure_chest:554730061463289857> They earned **{}** gold for their coffers.'.format(gold), inline=False)\r\n\r\n else: # defender wins\r\n defender_ship.win += 1\r\n attacker_ship.loss += 1\r\n # reset hulls just in case\r\n attacker_ship.repair_hull()\r\n defender_ship.repair_hull()\r\n em.add_field(name=\"__{}__ HP: {}\".format(attacker, attacker_ship.hull), value=attacker_msg, inline=True)\r\n em.add_field(name=\"__{}__ HP: {}\".format(defender, defender_ship.hull), value=defender_msg, inline=True)\r\n attacker_ship.update()\r\n defender_ship.update()\r\n em.add_field(name='{} is the winner! :shield:'.format(defender),\r\n value=' <a:armor:554559559545520128> Their ship survives to fight another day.', inline=False)\r\n\r\n await ctx.send(embed=em)", "def event_house_bust(self) -> None:\n print(f\"The house's hand contains {min(self.house.hand.value)}, they're bust\")\n self.event_player_wins()", "def test_buff_expiration(self):\n sess = self.sess\n battle = self.battle\n\n # For the buff to work, alice needs to own this region\n battle.region.owner = self.alice.team\n sess.commit()\n\n s1 = battle.create_skirmish(self.alice, 30) # Attack 30 infantry\n s1.react(self.bob, 30) # -- oppose 30 infantry\n s1.react(self.dave, 4) # -- oppose 4 infantry\n result = s1.resolve()\n self.assertEqual(result.victor, self.bob.team)\n self.assertEqual(result.margin, 4)\n self.assertEqual(result.vp, 30)\n\n s2 = battle.create_skirmish(self.bob, 29) # Attack with 29 infantry\n s2.react(self.alice, 29) # -- oppose with 29 infantry\n s2.react(self.carol, 2) # -- oppose with 2\n result = s2.resolve()\n self.assertEqual(result.victor, self.alice.team)\n self.assertEqual(result.margin, 2)\n self.assertEqual(result.vp, 29)\n\n # Bob's winning this, but wait! A buff that expires immediately!\n buff = db.Buff.otd(expiration=-30)\n battle.region.buff_with(buff)\n\n # One buff should exist in DB\n self.assertEqual(sess.query(db.Buff).count(), 1)\n db.Buff.update_all(sess)\n # Now it should be gone due to expiration\n self.assertEqual(sess.query(db.Buff).count(), 0)\n\n self.end_battle()\n\n # Bob wins because buff expired, 30 to 29\n self.assertEqual(self.battle.victor, self.bob.team)\n # score1 is the score for team 1\n self.assertEqual(self.battle.score1, 30)\n # score0 should not include the buff\n self.assertEqual(self.battle.score0, 29)", "def event11510130():\n header(11510130, 1)\n\n skip_if_event_flag_off(3, EVENT.AnorLondoGwynWarp)\n chr.disable(CHR.DarkwraithInBossRoom)\n chr.disable(CHR.SilverKnightArcherNearBossFog)\n end()\n\n # Changes to make when Dark Anor Londo begins\n skip_if_event_flag_on(10 + 2 * len(Darkwraiths), EVENT.DarkAnorLondo)\n chr.disable(6640)\n chr.disable(6650)\n chr.disable(CHR.ChapelMimic)\n chr.disable(CHR.AbyssalPrinceJareel)\n for darkwraith in Darkwraiths:\n chr.disable(darkwraith)\n if_event_flag_on(0, EVENT.DarkAnorLondo)\n chr.enable(6640)\n chr.enable(6650)\n chr.enable(CHR.ChapelMimic)\n chr.enable(CHR.AbyssalPrinceJareel)\n chr.disable_ai(CHR.AbyssalPrinceJareel) # maybe redundant\n for darkwraith in Darkwraiths:\n chr.enable(darkwraith)\n\n # Skips to here if Dark Anor Londo has already started.\n # Disable chapel chest (replaced by Mimic).\n obj.disable(OBJ.ChapelChest)\n obj.disable_activation(OBJ.ChapelChest, -1)\n for enemy_id in DarkAnorLondoAllies:\n chr.set_team_type(enemy_id, TeamType.fighting_ally)\n # Move Palace archer.\n warp.short_warp(1510301, 'region', REGION.MoveArcherInDarkPalace, -1)\n for enemy_id in DarkAnorLondoDisabled:\n chr.disable(enemy_id)\n for painting_guardian_id in range(1510150, 1510159):\n # Disable Painting Guardians on the floor (except one getting killed).\n chr.disable(painting_guardian_id)\n skip_if_event_flag_on(1, 11510861) # Skip if Darkmoon Guardian is already dead.\n warp.warp(CHR.DarkmoonGuardian, 'region', 1512451, -1)\n end_if_event_flag_on(1034) # Stop here if Darkmoon Knightess is already dead.\n warp.warp(CHR.DarkmoonKnightess, 'region', 1512450, -1)\n chr.set_nest(CHR.DarkmoonKnightess, 1512450)\n chr.set_standby_animation_settings_to_default(CHR.DarkmoonKnightess)", "def defend(self,targets): # need to add defending with same ranking card\n \"\"\"Accept targets as a list\"\"\"\n \"\"\"Return list - discardCards (if 0 means defender accepts all the cards)\"\"\"\n if len(self.currentHand) < len(targets): #Goes against the rules of the game\n Error(\"Incorrect amount of targets\")\n discardCards = []\n forfeit = False\n if self.AI:\n Error(\"AI not yet implemented for defending\")\n else:\n print(\"Cards that are currently attacking P\" + str(self.playerid) + \":\")\n cardManager.printNon(targets)\n print(\"Cards in P\" + str(self.playerid) + \" hand to defend with:\")\n cardManager.printHand(self.currentHand)\n for attackCard in targets: # iterate thru all attackers\n validDefend = False\n defendCard = 0\n while validDefend == False and forfeit == False:\n print(\"which card do you want to defend with from:\" , end=\" \")\n cardManager.printNon([attackCard])\n defendCard = int(input())\n while defendCard not in self.currentHand: # input checking\n defendCard = int(input(\"which card do you want to defend with?\"))\n # check if defenderCard is larger/ choose new card or give up\n validDefend = cardManager.compare(defendCard,attackCard)\n if validDefend == 'evaded':\n print(\"Perfect block\")\n self.currentHand.remove(defendCard)\n return (['evaded',defendCard] + targets)\n if validDefend == False:\n print(\"Failed defense...\")\n prompt = input(\"Do you wish to give up defense? (y/n)\")\n while prompt != \"y\" and prompt != 'n': # input checking\n prompt = input(\"Do you wish to give up defense? (y/n)\")\n if prompt == 'y':\n forfeit = True\n break\n else:\n print(\"valid defend!\")\n self.currentHand.remove(defendCard)\n discardCards.append(defendCard)\n discardCards.append(attackCard)\n if forfeit:\n break\n #results handling:\n if forfeit:\n for card in discardCards:\n self.currentHand.append(card)\n for card in targets:\n if card not in self.currentHand:\n self.currentHand.append(card)\n discardCards.clear()\n return discardCards", "async def attacking_logic(self):\n if len(self.units(UnitTypeId.ZERGLING)) >= 6:\n for zergling in self.units(UnitTypeId.ZERGLING):\n self.do(zergling.attack(self.enemy_start_locations[0]))", "def event1950():\n header(1950)\n boss_dead_flag, immediate_item, delayed_item_1, delayed_item_2 = define_args('iiii')\n end_if_event_flag_on(boss_dead_flag)\n if_event_flag_on(0, boss_dead_flag)\n skip_if_equal(1, immediate_item, 0)\n item.award_item_to_host_only(immediate_item)\n network.disable_sync()\n wait(5.0)\n skip_if_equal(1, delayed_item_1, 0)\n item.award_item_to_host_only(delayed_item_1)\n skip_if_equal(1, delayed_item_2, 0)\n item.award_item_to_host_only(delayed_item_2)", "def initialize(self):\n self.verbose = True # display the command descriptions next to the bot labels\n self.carrier = None\n self.interceptors = []\n self.assassins = dict()\n self.defenders = []\n self.camper = None\n self.attackers = []\n self.spawnCampers = []\n self.aliveEnemies = 0\n self.lastEventIndex = -1\n \n \n\n # Calculate flag positions and store the middle.\n self.ours = self.game.team.flag.position\n self.theirs = self.game.enemyTeam.flag.position\n self.middle = (self.theirs + self.ours) / 2.0\n\n # Now figure out the flanking directions, assumed perpendicular.\n d = (self.ours - self.theirs)\n self.left = Vector2(-d.y, d.x).normalized()\n self.right = Vector2(d.y, -d.x).normalized()\n self.front = Vector2(d.x, d.y).normalized()\n self.defendAngle = self.level.fieldOfViewAngles[BotInfo.STATE_DEFENDING]\n self.midEnemySpawn = self.game.enemyTeam.botSpawnArea[0].midPoint(self.game.enemyTeam.botSpawnArea[1])\n \n \"\"\"circle = 2 * math.pi\n outerVec = self.game.enemyTeam.botSpawnArea[0] - self.game.enemyTeam.flagSpawnLocation\n while circle > 0:\n self.defenders += [[None, self.angledVector(outerVec, self.defendAngle / 2)]]\n outerVec = self.angledVector(outerVec, self.defendAngle)\n circle -= self.defendAngle\n \n campPos = []\n campPos.append(Vector2(self.game.enemyTeam.botSpawnArea[0].x - self.level.firingDistance, self.game.enemyTeam.botSpawnArea[0].y + 0.5 * (self.game.enemyTeam.botSpawnArea[1].y - self.game.enemyTeam.botSpawnArea[0].y)))\n campPos.append(Vector2(self.game.enemyTeam.botSpawnArea[0].x + 0.5 * (self.game.enemyTeam.botSpawnArea[1].x - self.game.enemyTeam.botSpawnArea[0].x ), self.game.enemyTeam.botSpawnArea[1].y + self.level.firingDistance))\n campPos.append(Vector2(self.game.enemyTeam.botSpawnArea[1].x + self.level.firingDistance, self.game.enemyTeam.botSpawnArea[0].y + 0.5 * (self.game.enemyTeam.botSpawnArea[1].y - self.game.enemyTeam.botSpawnArea[0].y)))\n campPos.append(Vector2(self.game.enemyTeam.botSpawnArea[0].x + 0.5 * (self.game.enemyTeam.botSpawnArea[1].x - self.game.enemyTeam.botSpawnArea[0].x ), self.game.enemyTeam.botSpawnArea[0].y - self.level.firingDistance))\n\n for cp in campPos:\n free = self.level.findNearestFreePosition(cp)\n if free:\n sys.stdout.write(str(free) + '\\n')\n self.spawnCampers.append([None, free, False])\n \"\"\"\n sys.stdout.write(str(self.game.enemyTeam.botSpawnArea[1]) + ' ' + str(self.level.characterRadius) + '\\n')\n visited, islandEdges, islandOuter = [], [], []\n for x in range(0, len(self.level.blockHeights)):\n for y in range(0, len(self.level.blockHeights[x])):\n _, edges, island = self.recurseNeighbours(x, y, visited)\n if edges:\n islandEdges.append(edges)\n islandOuter.append(island)\n \n \n sys.stdout.write(str(islandEdges) + '\\n' + str(islandOuter) + '\\n')\n \n blocked = [item for sublist in islandOuter for item in sublist]\n #blockedOrSpawn = blocked[:]\n spawn = []\n for x in range(int(self.game.enemyTeam.botSpawnArea[0].x), int(self.game.enemyTeam.botSpawnArea[1].x)):\n for y in range(int(self.game.enemyTeam.botSpawnArea[0].y), int(self.game.enemyTeam.botSpawnArea[1].y)):\n spawn.append(Vector2(x, y))\n #blockedOrSpawn += spawn\n \n self.deadlines = dict()\n for i in range(len(islandEdges)):\n for coord, orientation in islandEdges[i]:\n if orientation is self.TOPLEFT:\n self.deadlineFromLine(blocked, spawn, coord, Vector2(coord.x - self.level.firingDistance / 1.0283968, coord.y + 0.24 * self.level.firingDistance / 1.0283968))\n elif orientation is self.BOTTOMLEFT:\n self.deadlineFromLine(blocked, spawn, coord, Vector2(coord.x - self.level.firingDistance / -1.0283968, coord.y - 0.24 * self.level.firingDistance / 1.0283968))\n elif orientation is self.LEFTUP:\n self.deadlineFromLine(blocked, spawn, coord, Vector2(coord.x + 0.24 * self.level.firingDistance / 1.0283968, coord.y - self.level.firingDistance / 1.0283968))\n elif orientation is self.RIGHTUP:\n self.deadlineFromLine(blocked, spawn, coord, Vector2(coord.x - 0.24 * self.level.firingDistance / 1.0283968, coord.y - self.level.firingDistance / 1.0283968))\n elif orientation is self.TOPRIGHT:\n self.deadlineFromLine(blocked, spawn, coord, Vector2(coord.x + self.level.firingDistance / 1.0283968, coord.y + 0.24 * self.level.firingDistance / 1.0283968))\n elif orientation is self.BOTTOMRIGHT:\n self.deadlineFromLine(blocked, spawn, coord, Vector2(coord.x + self.level.firingDistance / 1.0283968, coord.y - 0.24 * self.level.firingDistance / 1.0283968))\n elif orientation is self.LEFTDOWN:\n self.deadlineFromLine(blocked, spawn, coord, Vector2(coord.x + 0.24 * self.level.firingDistance / 1.0283968, coord.y + self.level.firingDistance / 1.0283968))\n elif orientation is self.RIGHTDOWN:\n self.deadlineFromLine(blocked, spawn, coord, Vector2(coord.x - 0.24 * self.level.firingDistance / 1.0283968, coord.y + self.level.firingDistance / 1.0283968))\n \n sys.stdout.write(str(self.deadlines) + '\\n')\n pointsAndLinesByEdge = dict()\n try:\n self.recursePaths(self.midEnemySpawn, blocked, self.deadlines, [], pointsAndLinesByEdge)\n except RuntimeError as e:\n sys.stdout.write(str(e) + '\\n')\n camplines = set()\n for edge, pls in pointsAndLinesByEdge.iteritems():\n for _, contact in pls:\n camplines.add((self.level.findNearestFreePosition(edge), contact))\n sys.stdout.write('\\n' + str(camplines))\n \n for cl in camplines:\n self.spawnCampers.append([[], cl])", "def event706():\n header(706, 0)\n\n if_event_flag_on(-1, 710)\n if_event_flag_on(-1, EVENT.WarpAbilityAtSunChamber)\n if_condition_true(0, -1)\n\n flag.enable(706) # Enable warping.\n\n # WARPING IS ACTIVE WHILE PENDING HERE.\n\n if_event_flag_on(-1, 11705170) # Player in Archive Tower ...\n if_in_world_area(-1, 11, 0) # OR player in Painted World ...\n if_in_world_area(7, 15, 1) # OR (Player in Anor Londo AND Dark Anor Londo active AND Jareel not dead)\n if_event_flag_on(7, 11510400)\n if_event_flag_off(7, 11510901)\n if_condition_true(-1, 7)\n if_in_world_area(6, 14, 1) # OR (Player in Lost Izalith AND Jeremiah present)\n if_event_flag_on(-2, EVENT.JeremiahInRuins)\n if_event_flag_on(-2, EVENT.JeremiahInIzalith)\n if_event_flag_on(-2, EVENT.JeremiahImpatient)\n if_event_flag_on(-2, EVENT.JeremiahFleeingIzalith)\n if_condition_true(6, -2)\n if_condition_true(-1, 6)\n if_condition_true(0, -1)\n flag.disable(706)\n\n # WARPING IS NOT ACTIVE WHILE PENDING HERE.\n\n if_event_flag_off(1, 11705170) # Player not in Archive Tower ...\n if_not_in_world_area(1, 11, 0) # AND player not in Painted World ...\n if_not_in_world_area(-7, 15, 1) # AND (player not in AL OR not Dark Anor Londo OR Jareel dead)\n if_event_flag_off(-7, 11510400)\n if_event_flag_on(-7, 11510901)\n if_condition_true(1, -7)\n if_not_in_world_area(-6, 14, 1) # AND (player not in Izalith OR Jeremiah gone)\n if_event_flag_off(2, EVENT.JeremiahInRuins)\n if_event_flag_off(2, EVENT.JeremiahInIzalith)\n if_event_flag_off(2, EVENT.JeremiahImpatient)\n if_event_flag_off(2, EVENT.JeremiahFleeingIzalith)\n if_condition_true(-6, 2)\n if_condition_true(1, -6)\n if_condition_true(0, 1)\n restart()", "def on_deal_dmg(self, target, friendly):\n if self.hurt:\n self.dfs -= target.atk\n if self.dfs <= 0 or target.poison:\n self.dead = True\n if target.hurt:\n target.dfs -= self.atk\n if target.dfs <= 0 or self.poison:\n target.dead = True\n\n # some special events may take place here\n # ... \n return self.atk", "def event11512001():\n header(11512001)\n end_if_this_event_on()\n\n if_event_flag_on(1, EVENT.DarkOrnsteinKilledFirst) # Ornstein died first.\n if_entity_health_less_than_or_equal(1, CHR.DarkSmough, 0.0)\n if_condition_true(-1, 1)\n if_event_flag_on(2, EVENT.DarkSmoughKilledFirst) # Smough died first.\n if_entity_health_less_than_or_equal(2, CHR.DarkOrnsteinScion, 0.0)\n if_condition_true(-1, 2)\n if_condition_true(0, -1)\n\n chr.cancel_special_effect(CHR.DarkOrnsteinScion, 4950) # Make his death animation normal speed.\n\n item.award_item_to_host_only(ITEMLOT.DarkOrnsteinAndSmoughReward)\n skip_if_condition_false_finished(3, 2)\n item.award_item_to_host_only(ITEMLOT.DarkOrnsteinScionReward)\n boss.kill_boss(CHR.DarkOrnsteinScion)\n skip(1)\n boss.kill_boss(CHR.DarkSmough)\n sound.play_sound_effect(CHR.Player, SoundType.s_sfx, 777777777)\n boss.disable_boss_health_bar(CHR.DarkSmough, TEXT.SunEaterSmough)\n boss.disable_boss_health_bar(CHR.DarkOrnsteinGiant, TEXT.ForsakenKnightOrnstein)\n\n for fog_wall, fog_sfx in zip((1511990, 1511992, 1511988), (1511991, 1511993, 1511989)):\n obj.disable(fog_wall)\n sfx.delete_map_sfx(fog_sfx, True)\n\n flag.enable(EVENT.DarkOrnsteinAndSmoughDead)\n wait(3.0)\n sound.disable_map_sound(1513800)", "def battle_resting(self):\n pass" ]
[ "0.64206195", "0.6213481", "0.6145314", "0.6064711", "0.59478605", "0.5906304", "0.58735126", "0.5822274", "0.5806574", "0.58036184", "0.57708496", "0.5770829", "0.57505125", "0.57458353", "0.5717229", "0.57041544", "0.5693837", "0.5692482", "0.56834847", "0.56811273", "0.5658594", "0.5647957", "0.5643278", "0.5621468", "0.56166214", "0.56020695", "0.55939794", "0.55711097", "0.55467814", "0.55448073" ]
0.64651537
0
The `time_units` object should list singular/plural string tuples for each possible time unit choice.
def test_fields_effort_time_units_dictionary_string(self, _mock_check): field = EffortField(time_units={"minute": "minute"}) errors = field.check() self.assertEqual(len(errors), 1) error = errors[0] self.assertEqual(error.msg, "'time_units' must be a dictionary of tuples.") self.assertEqual(error.obj, field) self.assertEqual(error.id, "fields.E1011")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_time_unit(self, variables):\n if len(self.TIME_VARIABLE):\n # times = self._get_variable(variables, self.TIME_VARIABLE)[:]\n units = variables['time'].units\n return units\n else:\n return \"\"", "def time_units(self) -> str:\n return self._ll_tree_sequence.get_time_units()", "def infer_time_unit(time_seconds_arr: Collection[float]) -> TimeUnit:\n if not time_seconds_arr:\n return \"hours\"\n max_time_seconds = max(time_seconds_arr)\n if max_time_seconds <= 60 * 2:\n return \"seconds\"\n elif max_time_seconds <= 60 * 60 * 2:\n return \"minutes\"\n elif max_time_seconds <= 24 * 60 * 60 * 2:\n return \"hours\"\n else:\n return \"days\"", "def humanize_time(amount, units=None):\n result = []\n\n if units is None:\n units = 'seconds'\n\n unit = map(lambda l: l[1], NAMES).index(units)\n # Convert to seconds\n amount *= INTERVALS[unit]\n\n for i in range(len(NAMES) - 1, -1, -1):\n a = amount // INTERVALS[i]\n if a > 0:\n if amount - a < 1:\n result.append((amount, NAMES[i][1]))\n else:\n result.append((int(a), NAMES[i][1 % int(a)]))\n amount -= a * INTERVALS[i]\n\n return result", "def scale_time_to(recs, unit):\n\n for r in recs:\n if unit == 'd':\n r.t = [t / 3600 / 24 for t in r.time]\n elif unit == 'hours':\n r.t = [t / 3600 for t in r.time]\n elif unit == 'min':\n r.t = [t / 60 for t in r.time]\n elif unit in ('s', 'sec'):\n r.t = r.time\n else:\n Exception('Wrong time unit')\n\n Records.time_unit = unit\n Records.time_label = 'Time (' + unit + ')'", "def convert_units(self, time_units=None, len_units=None):\n in_time = self.time_units\n # Check new time units\n if time_units is None:\n time_units = in_time\n flag = _units.validate_units(time_units)\n if flag == -1:\n raise ValueError('Bad time units input {}'.format(time_units))\n # Check new length units\n in_len = self.len_units\n if len_units is None:\n len_units = in_len\n flag = _units.validate_units(len_units)\n if flag == -1:\n raise ValueError('Bad length units input {}'.format(len_units))\n # Convert parameters units\n for key, value in self.parameters.items():\n if type(value) in [int, float]:\n self.parameters[key] = _units.units_conversion(value, in_len, len_units)\n # Convert drawdown data\n self.drawdown.convert_units(time_units, len_units)\n # Convert associate data units\n for i in range(self.data_count()):\n if self.data[i].dtype == 1: # drawdown units\n data_units = len_units\n elif self.data[i].dtype == 2: # first derivative units\n data_units = len_units + \"/\" + time_units\n elif self.data[i].dtype == 3: # second derivative units\n data_units = len_units + \"/\" + time_units + \"2\"\n self.data[i].convert_units(time_units, data_units)\n self.len_units = len_units\n self.time_units = time_units\n # End Function", "def test_fields_effort_time_units_tuple(self, _mock_check):\n field = EffortField(time_units=((\"minute\", \"minute\"),))\n\n errors = field.check()\n self.assertEqual(len(errors), 1)\n\n error = errors[0]\n self.assertEqual(error.msg, \"'time_units' must be a dictionary of tuples.\")\n self.assertEqual(error.obj, field)\n self.assertEqual(error.id, \"fields.E1011\")", "def scale_time_units(time_seconds_arr: Collection[float], unit: TimeUnit) -> Collection[float]:\n if unit == \"minutes\":\n factor = 60\n elif unit == \"hours\":\n factor = 60 * 60\n elif unit == \"days\":\n factor = 24 * 60 * 60\n else:\n factor = 1\n return [x / factor for x in time_seconds_arr]", "def set_hm_unit_display(self):\r\n units = str(self.entries['units'].combobox.currentText())\r\n self.ui.is_unitL1.setText(units)\r\n self.ui.is_unitL2.setText(units)\r\n self.ui.is_unitL3.setText(units)\r\n self.ui.is_unitL4.setText(units)", "def setTimeUnits(self, *args):\n return _libsbml.Model_setTimeUnits(self, *args)", "def test_fields_effort_time_units_dictionary_success(self, _mock_check):\n field = EffortField(time_units={\"minute\": (\"minute\", \"minutes\")})\n\n errors = field.check()\n self.assertEqual(len(errors), 0)", "def convert_time(self, event):\n try:\n #Compare other unit to one unit(seconds)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"centuries\": 3153600000.0, \"days\": 86400.0, \"decades\": 315360000.0, \"femtoseconds\": 1e-15, \"fortnights\": 1209600.0, \"hours\": 3600.0, \"microseconds\": 1e-06, \"millenia\": 31536000000.0, \"milliseconds\": 0.001, \"minutes\": 60.0, \"months(Common)\": 2628000.0, \"months(Synodic)\": 2551442.8896, \"nanoseconds\": 1e-09, \"picoseconds\": 1e-12, \"quarters(Common)\": 7884000.0, \"seconds\": 1.0, \"shakes\": 1e-08, \"weeks\": 604800.0, \"years(Average Gregorian)\": 31556952.0, \"years(Common)\": 31536000.0, \"years(Julian)\": 31557600.0, \"years(Leap)\": 31622400.0, \"years(Tropical)\": 31556925.216}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def time_unit(self):\n self.skip_over_label['text'] = \"Start at (mins:secs): \"\n self.sampled_rate_label['text'] = \"Sample every (secs): \"\n self.total_frames_label['text'] = \"End at (mins:secs): \"", "def get_time_with_unit(time):\n sec_in_min = 60\n sec_in_hour = 60 * 60\n sec_in_day = 24 * 60 * 60\n\n if time % sec_in_day == 0:\n time = time / sec_in_day\n unit = 'days'\n\n elif time % sec_in_hour == 0:\n time = time / sec_in_hour\n unit = 'hours'\n\n else:\n time = time / sec_in_min\n unit = 'minutes'\n return \"%s %s\" % (time, unit)", "def convert_units(self, time_units=None, len_units=None, pump_units=None,\n same=False):\n in_time = self.time_units\n # Check new time units\n if time_units is None:\n time_units = in_time\n flag = _units.validate_units(time_units)\n if flag == -1:\n raise ValueError('Bad time units input {}'.format(time_units))\n # Check new length units\n in_len = self.len_units\n if len_units is None:\n len_units = in_len\n flag = _units.validate_units(len_units)\n if flag == -1:\n raise ValueError('Bad length units input {}'.format(len_units))\n # Check new pumping rate units\n in_pump = self.pump_units\n if pump_units is None:\n pump_units = in_pump\n if same:\n pump_units = \"%s3/%s\" % (len_units, time_units)\n flag = _units.validate_units(pump_units)\n if flag == -1:\n raise ValueError('Bad pumping rate units input {}'.format(len_units))\n\n # Convert parameters units\n for key, value in self.parameters.items():\n if type(value) in [int, float]:\n self.parameters[key] = _units.units_conversion(value, in_len, len_units)\n # Convert pumping rate data\n self.pumprate.convert_units(time_units, pump_units)\n # Convert well data units\n for i in range(self.well_count()):\n self.wells[i].convert_units(time_units, len_units)\n # Set input units\n self.len_units = len_units\n self.time_units = time_units\n self.pump_units = pump_units\n # End Function", "def format_timedelta(timedelta, in_seconds=False, largest_units=None, auto_singulars=False):\n\n units = TIME_UNITS[1:] if in_seconds else TIME_UNITS\n\n if largest_units is None:\n largest_units = units[-1][0]\n elif largest_units not in map(lambda x: x[0], units):\n raise ValueError('Invalid largest units specified')\n\n if timedelta == 0:\n return '0 ' + units[0][0]\n\n out_str = ''\n\n for name, diviser in units:\n if timedelta == 0:\n break\n\n if largest_units == name:\n diviser = None\n\n val = timedelta % diviser if diviser else timedelta\n if val != 0:\n out_str = str(val) + ' ' + (name[:-1] if auto_singulars and val == 1 else name) + ', ' + out_str\n\n if diviser is None:\n break\n\n timedelta //= diviser\n\n return out_str.strip(', ')", "def in_units_of(self, *units):\n units = list(map(_find_unit, units))\n if len(units) == 1:\n unit = units[0]\n value = _convert_value(self.value, self.unit, unit)\n return self.__class__(value, unit, self._space)\n else:\n units.sort()\n result = []\n value = self.value\n unit = self.unit\n for i in range(len(units) - 1, -1, -1):\n value = value * unit.conversion_factor_to(units[i])\n if i == 0:\n rounded = value\n else:\n rounded = _round(value)\n result.append(self.__class__(rounded, units[i]))\n value = value - rounded\n unit = units[i]\n return tuple(result)", "def test_combined_long_singular(self):\n expected = dict(\n seconds=1, minutes=2, hours=3, days=4, weeks=5, months=6, years=7)\n self.assertEqual(\n expected,\n util.parse_relative_time_string(\n \"+1second 2minute 3hour 4day 5week 6month 7year\"))", "def build_convert_to_hours(time_units):\n if time_units not in VALID_TIME_UNITS:\n raise ValueError('Time units must be one of', VALID_TIME_UNITS)\n \n if time_units == 'min':\n return lambda x: x/60\n elif time_units == 'h':\n return lambda x: x", "def formatTime(seconds, timeUnits):\n neg = False\n if seconds < 0: \n seconds = seconds * -1\n neg = True\n label = None\n if timeUnits == \"SECS_MILLIS\":\n label = \"%.2f\" % seconds\n elif timeUnits == \"SECS\":\n label = \"%d\" % int(round(seconds))\n elif timeUnits == \"MINS\":\n mins = float(seconds) / float(60)\n label = \"%d\" % int(round(mins))\n elif timeUnits == \"HOURS\":\n hrs = float(seconds) / float(3600)\n label = \"%d\" % int(round(hrs))\n elif timeUnits == \"MINS_SECS\":\n mins = seconds / 60\n secs = round(seconds % 60)\n label = \"%d:%02d\" % (mins, secs)\n elif timeUnits == \"HOURS_MINS\":\n hrs = seconds / 3600\n mins = round((seconds % 3600)/60)\n label = \"%d:%02d\" % (hrs, mins)\n elif timeUnits == \"HOURS_MINS_SECS\":\n hrs = seconds / 3600\n mins = (seconds % 3600)/60\n secs = round(seconds % (3600 * 60))\n label = \"%d:%02d:%02d\" % (hrs, mins, secs)\n elif timeUnits == \"HOURS_MINS_SECS_MILLIS\":\n hrs = seconds / 3600\n mins = (seconds % 3600)/60\n secs = (seconds % (3600 * 60))\n label = \"%d:%02d:%05.2f\" % (hrs, mins, secs)\n else:\n label = \"%.2f sec\" % seconds\n return neg and \"-%s\"%label or label", "def assign_unit(self):\n self.units = {}\n for unit in RADIAL_UNITS:\n if unit.REPR == \"2th_deg\":\n self.units[unit] = self.tth_deg\n elif unit.REPR == \"2th_rad\":\n self.units[unit] = self.tth_rad\n elif unit.REPR == \"q_nm^-1\":\n self.units[unit] = self.q_nm\n elif unit.REPR == \"q_A^-1\":\n self.units[unit] = self.q_A\n elif unit.REPR == \"r_mm\":\n self.units[unit] = self.r_mm\n else:\n logger.warning(\"Unit unknown to GUI %s\" % unit)", "def convert_unit(self, time_unit):\r\n\r\n self.time_unit = time_unit\r\n self._conversion_factor = time_unit_conversion[time_unit]", "def units(self, *args):\n u = self.parent.unit\n return tuple('%s%s' % (a, u) for a in args)", "def test_change_units(self):\n s = State(\"water\", T=Q_(100, \"degC\"), p=Q_(1.0, \"atm\"), units=\"EE\")\n assert s.units == \"EE\"\n s.units = \"SI\"\n assert s.units == \"SI\"\n assert s.cv.units == \"kilojoule / kelvin / kilogram\"\n assert s.cp.units == \"kilojoule / kelvin / kilogram\"\n assert s.s.units == \"kilojoule / kelvin / kilogram\"\n assert s.h.units == \"kilojoule / kilogram\"\n assert s.T.units == \"degree_Celsius\"\n assert s.u.units == \"kilojoule / kilogram\"\n assert s.v.units == \"meter ** 3 / kilogram\"\n assert s.p.units == \"bar\"", "def setTimeUnits(self, *args):\n return _libsbml.Event_setTimeUnits(self, *args)", "def convert_units(self, units):\n self.unit_array = self.unit_array.to(units)", "def duration_steps_readable(durations):\n duration_strings = list()\n for i, minutes in enumerate(durations):\n duration_strings.append(minutes_readable(minutes))\n return duration_strings", "def valid_unit() -> List[str]:\n return [\n AssignmentState.CREATED,\n AssignmentState.LAUNCHED,\n AssignmentState.ASSIGNED,\n AssignmentState.COMPLETED,\n AssignmentState.ACCEPTED,\n AssignmentState.REJECTED,\n AssignmentState.SOFT_REJECTED,\n AssignmentState.EXPIRED,\n ]", "def _get_representation_component_units(args, kwargs):\n if \"unit\" not in kwargs:\n units = [None, None, None]\n\n else:\n units = kwargs.pop(\"unit\")\n\n if isinstance(units, str):\n units = [x.strip() for x in units.split(\",\")]\n # Allow for input like unit='deg' or unit='m'\n if len(units) == 1:\n units = [units[0], units[0], units[0]]\n elif isinstance(units, (Unit, IrreducibleUnit)):\n units = [units, units, units]\n\n try:\n units = [(Unit(x) if x else None) for x in units]\n units.extend(None for x in range(3 - len(units)))\n if len(units) > 3:\n raise ValueError()\n except Exception as err:\n raise ValueError(\n \"Unit keyword must have one to three unit values as \"\n \"tuple or comma-separated string.\"\n ) from err\n\n return units", "def fits_to_units(unit_str):\n unit_lookup = {\n 'meters': 'm',\n 'meter': 'm',\n 'degrees': 'deg',\n 'degree': 'deg',\n 'hz': 'Hz',\n 'hertz': 'Hz',\n 'second': 's',\n 'sec': 's',\n 'secs': 's',\n 'days': 'd',\n 'day': 'd',\n 'steradians': 'sr',\n 'steradian': 'sr',\n 'radians': 'rad',\n 'radian': 'rad',\n 'jy': 'Jy',\n 'au': 'AU',\n }\n\n try:\n new_units = \"\"\n\n if unit_str is None:\n unit_str = ''\n unit_str = unit_str.lower()\n unit_list = unit_str.split(\"/\")\n\n for uu in unit_list:\n if uu.endswith(\"s\") and len(uu) > 1:\n uu = uu[:-1]\n corrected_unit = unit_lookup.get(uu, uu)\n new_units += corrected_unit\n new_units += \" / \"\n new_units = new_units[:-3]\n unit = Unit(new_units)\n return unit\n\n except ValueError:\n warnings.warn(\"Unknown unit: %s\" % new_units, UnitWarning)\n return UnrecognizedUnit(unit_str)" ]
[ "0.66210455", "0.64962125", "0.6310256", "0.6243986", "0.6172801", "0.6071503", "0.6054874", "0.597073", "0.5903953", "0.58468944", "0.5775648", "0.57703346", "0.57623714", "0.5754559", "0.5732214", "0.572949", "0.5704426", "0.56998724", "0.56950045", "0.562585", "0.55660784", "0.5565496", "0.5559629", "0.5557525", "0.5493018", "0.5477547", "0.5461724", "0.5428706", "0.5401249", "0.5383214" ]
0.65280527
1
Successfuly instantiate an effort field with a valid definition of time_units.
def test_fields_effort_time_units_dictionary_success(self, _mock_check): field = EffortField(time_units={"minute": ("minute", "minutes")}) errors = field.check() self.assertEqual(len(errors), 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_fields_effort_time_units_required(self, _mock_check):\n field = EffortField()\n errors = field.check()\n self.assertEqual(len(errors), 1)\n error = errors[0]\n self.assertEqual(\n error.msg, \"Effort fields must define a 'time_units' attribute.\"\n )\n self.assertEqual(error.obj, field)\n self.assertEqual(error.id, \"fields.E1010\")", "def test_fields_effort_time_units_dictionary_string(self, _mock_check):\n field = EffortField(time_units={\"minute\": \"minute\"})\n\n errors = field.check()\n self.assertEqual(len(errors), 1)\n\n error = errors[0]\n self.assertEqual(error.msg, \"'time_units' must be a dictionary of tuples.\")\n self.assertEqual(error.obj, field)\n self.assertEqual(error.id, \"fields.E1011\")", "def test_fields_effort_default_effort_unit_invalid(self, _mock_check):\n field = EffortField(\n time_units={\"minute\": (\"minute\", \"minutes\")}, default_effort_unit=\"invalid\"\n )\n\n errors = field.check()\n self.assertEqual(len(errors), 1)\n\n error = errors[0]\n self.assertEqual(error.msg, \"'invalid' is not a valid time unit.\")\n self.assertEqual(error.obj, field)\n self.assertEqual(error.id, \"fields.E1012\")", "def test_fields_effort_default_effort_unit_success(self, _mock_check):\n field = EffortField(\n time_units={\"minute\": (\"minute\", \"minutes\")}, default_effort_unit=\"minute\"\n )\n\n errors = field.check()\n self.assertEqual(len(errors), 0)", "def test_fields_effort_time_units_tuple(self, _mock_check):\n field = EffortField(time_units=((\"minute\", \"minute\"),))\n\n errors = field.check()\n self.assertEqual(len(errors), 1)\n\n error = errors[0]\n self.assertEqual(error.msg, \"'time_units' must be a dictionary of tuples.\")\n self.assertEqual(error.obj, field)\n self.assertEqual(error.id, \"fields.E1011\")", "def test_time_field():", "def test_fields_effort_default_reference_unit_invalid(self, _mock_check):\n field = EffortField(\n time_units={\"minute\": (\"minute\", \"minutes\")},\n default_reference_unit=\"invalid\",\n )\n\n errors = field.check()\n self.assertEqual(len(errors), 1)\n\n error = errors[0]\n self.assertEqual(error.msg, \"'invalid' is not a valid time unit.\")\n self.assertEqual(error.obj, field)\n self.assertEqual(error.id, \"fields.E1013\")", "def __init__(self, value, time, time_type=\"int\", currency=\"mxn\"):\n self.time = Time(time, time_type)\n self.value = self.validate_value(value)\n self.currency = self.validate_currency(currency)", "def test_issue_add_time(self):\n pass", "def __init__(__self__, *,\n duration_hours: pulumi.Input[int],\n schedule: pulumi.Input['ScheduleArgs'],\n start_time: pulumi.Input[str],\n not_allowed_dates: Optional[pulumi.Input[Sequence[pulumi.Input['DateSpanArgs']]]] = None,\n start_date: Optional[pulumi.Input[str]] = None,\n utc_offset: Optional[pulumi.Input[str]] = None):\n if duration_hours is None:\n duration_hours = 24\n pulumi.set(__self__, \"duration_hours\", duration_hours)\n pulumi.set(__self__, \"schedule\", schedule)\n pulumi.set(__self__, \"start_time\", start_time)\n if not_allowed_dates is not None:\n pulumi.set(__self__, \"not_allowed_dates\", not_allowed_dates)\n if start_date is not None:\n pulumi.set(__self__, \"start_date\", start_date)\n if utc_offset is not None:\n pulumi.set(__self__, \"utc_offset\", utc_offset)", "def __init__(self, planned: DutyTimes=None, actual: DutyTimes=None, shift_type: str=None):\n self.openapi_types = {\n 'planned': DutyTimes,\n 'actual': DutyTimes,\n 'shift_type': str\n }\n\n self.attribute_map = {\n 'planned': 'planned',\n 'actual': 'actual',\n 'shift_type': 'shiftType'\n }\n\n self._planned = planned\n self._actual = actual\n self._shift_type = shift_type", "def test_fields_effort_default_reference_unit_success(self, _mock_check):\n field = EffortField(\n time_units={\"minute\": (\"minute\", \"minutes\")},\n default_reference_unit=\"minute\",\n )\n\n errors = field.check()\n self.assertEqual(len(errors), 0)", "def test_create_missing_additional_duration(self):\n self.mockAuthToken()\n self.mockServerGetDetailsByComputers()\n self.mockTicketGetTicket()\n maint_params = {\n 'master_ticket' : '080102-00121',\n 'description' : 'do stuff',\n 'expedite' : False,\n 'billing_text' : 'send me the bill',\n #'additional_duration_minutes': '60',\n 'service_type_id' : 1,\n 'employee_contact_id' : 1,\n 'servers' : 110912\n }\n response = self.app.post(url_for(controller='/maintenances', action='create'), params=maint_params)\n self.assertEqual(response.status, 200)\n new_maint = db_sess.query(ScheduledMaintenance).filter_by(master_ticket='080102-00121').one()\n self.assertEqual(new_maint.additional_duration, timedelta(minutes=0))", "def test_time_must_be_valid(self):\n with self.assertRaises(Exception) as context:\n self.client.post(\n url_for('teams'),\n data={\n 'name': 'team',\n 'capacity': '11',\n 'number_players': '1',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01-01 at 13:00'\n }\n )\n self.assertTrue('Time must be a valid format' in context.exception)\n self.assertEqual(db.session.query(Team).count(), 0)", "def __init__(self, metricName, timeResolutions = (86400,)):\n self.metric = metricName\n self.timeResolutions = timeResolutions", "def __init__(self, time, numerator, denominator):\n self.time = time\n self.numerator = numerator\n self.denominator = denominator", "def _setup_volunteer_hours(\n volunteer,\n npf_admin,\n org,\n project,\n datetime_start,\n datetime_end,\n description=\"Manually tracked time \",\n event_type=\"MN\",\n is_verified=False,\n action_type='req'\n):\n event = Event.objects.create(\n project=project,\n is_public=True,\n description=\"finished event\",\n location=\"test_location\",\n coordinator=npf_admin,\n event_type=event_type,\n datetime_start=datetime_start,\n datetime_end=datetime_end\n )\n\n volunteer_timelog = UserTimeLog.objects.create(\n user=volunteer,\n event=event,\n datetime_start=datetime_start,\n datetime_end=datetime_end,\n is_verified=is_verified\n )\n\n actiontimelog = AdminActionUserTime.objects.create(\n user=npf_admin,\n usertimelog=volunteer_timelog,\n action_type=action_type\n )\n\n return volunteer_timelog, actiontimelog, event", "def test_total_time_no_end_time(time_record_factory):\n d = datetime.datetime(2018, 10, 1, 15, 26)\n t = time_record_factory(time_start=d, time_end=None)\n expected = datetime.timedelta(0)\n assert t.total_time == expected", "def __init__(self, offset_hours: int) -> None:\r\n self.offset = datetime.timedelta(hours=offset_hours)", "def test_initialization_of_homework_deadline():\n expected = datetime.timedelta(days=1)\n assert oop_hw.deadline_days == expected", "def __init__(self,\n day=None,\n end_time=None,\n start_time=None,\n ):\n\n # Initialize members of the class\n self.day = day\n self.end_time = end_time\n self.start_time = start_time", "def __init__(self,\n time: Timestamp,\n duration: Duration,\n operation: ops.Operation) -> None:\n self.time = time\n self.duration = duration\n self.operation = operation", "def test_format_optional_time_field(self):\n formatted_time = jiratimereport.format_optional_time_field(99960, \"\")\n expected_result = \"27:46:00\"\n self.assertEqual(expected_result, formatted_time)", "def __init__(self, Date, TimeOfDay):\n self.date = Date\n self.time_of_day = TimeOfDay", "def __init__(self, date, startTime, endTime, summary, offset):\n\n self.summary = summary\n self.start = datetime.datetime.strptime(\n date + startTime, \"%B %d, %Y%I:%M %p\")\n self.end = datetime.datetime.strptime(\n date + endTime, \"%B %d, %Y%I:%M %p\")\n self.length = self.end - self.start\n self.offset = offset\n\n self.description = \"Ingen lunchrast!\"", "def build_convert_to_hours(time_units):\n if time_units not in VALID_TIME_UNITS:\n raise ValueError('Time units must be one of', VALID_TIME_UNITS)\n \n if time_units == 'min':\n return lambda x: x/60\n elif time_units == 'h':\n return lambda x: x", "def __init__(self, duration):\n self._duration = validate_duration(duration, min_duration=16,\n max_duration=4194304)", "def __init__(self, name=\"\", time=None):\n super().__init__(\"time\", name)\n self.time = time", "def test_str_time_period_hrs(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx,\n \"TestSensor\",\n group_address_state=\"1/2/3\",\n value_type=\"time_period_hrs\",\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0x29,\n 0xDE,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), 10718)\n self.assertEqual(sensor.unit_of_measurement(), \"h\")\n self.assertEqual(sensor.ha_device_class(), None)", "def test_get_runtime_in_hours(self):\n \n # Create a movie object\n resource = Resource(2, \"A clockwork Orange\", [Name(\"Stanley\", \"Kubrick\")], \n \"Protagonist Alex DeLarge is an ultraviolent youth in \"\\\n \"futuristic Britain. As with all luck, his eventually runs out \"\\\n \"and he's arrested and convicted of murder and rape. While in \"\\\n \"prison, Alex learns of an experimental program in which \"\\\n \"convicts are programmed to detest violence. If he goes \"\\\n \"through the program, his sentence will be reduced and he will \"\\\n \"be back on the streets sooner than expected. But Alex's \"\\\n \"ordeals are far from over once he hits the mean streets of \"\\\n \"Britain that he had a hand in creating.\",\n \"sci-fi\", \"English\", 1971, \"US\", 136, \"movie\",\n [\"dystopia\", \"violence\", \"alternate society\"])\n \n # Assert expected result\n self.assertEqual(resource.get_runtime_in_hours(), (2, 16))" ]
[ "0.70330894", "0.6708433", "0.637528", "0.61861193", "0.6182248", "0.60161173", "0.5713787", "0.57050014", "0.57042706", "0.5659156", "0.56355757", "0.55543", "0.5543725", "0.55088395", "0.5478543", "0.53843373", "0.537652", "0.5352166", "0.5351796", "0.53372765", "0.53111565", "0.52857465", "0.5276464", "0.52602226", "0.52553505", "0.5251947", "0.52261734", "0.51935136", "0.5175785", "0.5174223" ]
0.6872836
1
Trying to instantiate an effort field with an invalid default effort unit should not pass the checks.
def test_fields_effort_default_effort_unit_invalid(self, _mock_check): field = EffortField( time_units={"minute": ("minute", "minutes")}, default_effort_unit="invalid" ) errors = field.check() self.assertEqual(len(errors), 1) error = errors[0] self.assertEqual(error.msg, "'invalid' is not a valid time unit.") self.assertEqual(error.obj, field) self.assertEqual(error.id, "fields.E1012")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_fields_effort_default_effort_unit_success(self, _mock_check):\n field = EffortField(\n time_units={\"minute\": (\"minute\", \"minutes\")}, default_effort_unit=\"minute\"\n )\n\n errors = field.check()\n self.assertEqual(len(errors), 0)", "def test_fields_effort_default_reference_unit_invalid(self, _mock_check):\n field = EffortField(\n time_units={\"minute\": (\"minute\", \"minutes\")},\n default_reference_unit=\"invalid\",\n )\n\n errors = field.check()\n self.assertEqual(len(errors), 1)\n\n error = errors[0]\n self.assertEqual(error.msg, \"'invalid' is not a valid time unit.\")\n self.assertEqual(error.obj, field)\n self.assertEqual(error.id, \"fields.E1013\")", "def test_fields_effort_time_units_required(self, _mock_check):\n field = EffortField()\n errors = field.check()\n self.assertEqual(len(errors), 1)\n error = errors[0]\n self.assertEqual(\n error.msg, \"Effort fields must define a 'time_units' attribute.\"\n )\n self.assertEqual(error.obj, field)\n self.assertEqual(error.id, \"fields.E1010\")", "def test_init_invalid_order(self):\n with self.assertRaises(ValueError):\n Person(\n self.initial_year, self.name, self.birth_date,\n retirement_date=self.birth_date - relativedelta(days=1))", "def test_glass_capacity__has_expected_default_value():\n glass = moet.create_glass(\"A\")\n assert glass.capacity == 250", "def test_fields_effort_default_reference_unit_success(self, _mock_check):\n field = EffortField(\n time_units={\"minute\": (\"minute\", \"minutes\")},\n default_reference_unit=\"minute\",\n )\n\n errors = field.check()\n self.assertEqual(len(errors), 0)", "def testDefaultFields_InvalidSingle(self):\n def action(field_class):\n self.assertRaises(messages.InvalidDefaultError,\n field_class,\n 1,\n default=object())\n self.ActionOnAllFieldClasses(action)", "def test_employee_creation_bad_fields(self):\n helper = EmployeeHelper(name='Andrew', hired_on='2019-10-01T00:00:00', salary=None, department_id=None)\n result = self.client.execute(helper.get_create_employee_query())['data']['createEmployee']['employee']\n\n self.assertEqual(result['name'], helper.name)\n self.assertEqual(result['hiredOn'], helper.hired_on)\n self.assertEqual(result['salary'], 0)\n self.assertIsNone(result['departmentId'])", "def test_set_glass_capacity__with_invalid_numbers__returns_expected():\n glass = moet.create_glass(\"A\")\n with pytest.raises(ValueError):\n glass.capacity = -100", "def test_raise_on_missing_critical(self):\n name_for_field = 'absent_field'\n field_opts = {'names': (name_for_field, 'absent'), 'alt_field': '', 'computed': False}\n critical_fields = {'absent_field': field_opts}\n with self.assertRaises(ImproperlyConfigured):\n self.form.fields_for_critical(critical_fields)", "def test_default_missing_honor(self):\r\n self.url_params['honor_code'] = ''\r\n response = self.client.post(self.url, self.url_params)\r\n self.assertEqual(response.status_code, 400)\r\n obj = json.loads(response.content)\r\n self.assertEqual(\r\n obj['value'],\r\n u'To enroll, you must follow the honor code.',\r\n )", "def test_init(self):\n # test invalid normalized_items\n with self.assertRaisesRegex(\n KeyError, \"Should be one of 'bbox', 'head', 'torso'\"):\n JhmdbPCKAccuracy(norm_item='invalid')", "def test_schema_default_missing_validator_combinations(test_case):\n evaluate_test_cases([test_case])", "def test_default_zero_fields_validate(self):\r\n it = self.IntegerTest()\r\n it.validate()", "def testinvalidability(self):\n self.assertRaises(AbilityError, AmuletAbility, 'Invalid')\n self.assertRaises(AbilityError, AmuletAbility, '')", "def testBadVariableCostItem(self):\n\t \n costs = pf.VariableCosts()\n self.assertRaises(pf.BadVariableExpenseItem, costs.add_variable_expense, \"ninja\")", "def test_init(self):\n # test invalid normalized_items\n with self.assertRaisesRegex(\n KeyError, \"Should be one of 'bbox', 'head', 'torso'\"):\n PCKAccuracy(norm_item='invalid')", "def test_alright_when_required_field_is_missing_but_default_is_given():\n\n model_definition = {'language': {'type': 'fixed',\n 'required': True,\n 'persisted': True,\n 'default': 'portuguese'},\n 'source': {'type': 'list',\n 'required': False,\n 'persisted': True}}\n product1 = {'source': ['Whatever']}\n factory = ProductModelFactory(model_definition)\n factory.build('product1', product1)\n # Ok. No exceptions were raised.", "def test_get_fields_and_lookups_invalid_lookup(self):\n with self.assertRaises(exceptions.FieldError):\n utils.get_fields_and_lookups(Protected, 'protector__date__hour')", "def test_missing_required_field_raises_error():\n with pytest.raises(ValidationError):\n Entity()", "def test_capacity_cannot_be_empty(self):\n with self.assertRaises(Exception) as context:\n self.client.post(\n url_for('teams'),\n data={\n 'name': 'team',\n 'capacity': 'hello',\n 'number_players': '1',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n }\n )\n self.assertTrue('Capacity must be a number' in context.exception)\n self.assertEqual(db.session.query(Team).count(), 0)", "def test_field_none_nullable(self):\n node_dict = {\n 'host_name': 'abc'\n }\n try:\n Node(**node_dict)\n except Exception as e:\n self.assertEqual(type(e), ValueError)", "def test_init(self):\n # test invalid normalized_items\n with self.assertRaisesRegex(\n KeyError, \"Should be one of 'bbox', 'head', 'torso'\"):\n MpiiPCKAccuracy(norm_item='invalid')", "def test_create_risk_with_empty_field(self, field):\n risk_body = self.generate_risk_body()\n risk_body[field] = None\n\n response = self.api.post(all_models.Risk, data=risk_body)\n\n self.assert400(response)", "def test_creation_incorrect_hardbounds_count():\n with pytest.raises(ValueError) as __:\n value = 1\n __ = param.Integer(value=value, hardbounds=[0, 10, 20])", "def test_set_outside_bounds_default_value(self):\n with pytest.raises(ValueError):\n Real(\"yolo\", \"uniform\", -3, 2, default_value=5)", "def test_defining_only_or_defer_on_nonexistant_fields_fails(self):", "def test_no_project_defaults(self):\n ep = exposed.ExposedProject()\n self.assertIsNone(ep.display)\n self.assertIsNone(ep.shared)\n self.assertIsNone(ep.settings)\n self.assertIsNone(ep.title)\n self.assertIsNone(ep.id)\n self.assertIsNone(ep.path())\n\n with self.assertRaises(RuntimeError):\n ep.title = 'Some Title'", "def test_required_level_of_education_missing(self):\r\n self.url_params['level_of_education'] = ''\r\n response = self.client.post(self.url, self.url_params)\r\n self.assertEqual(response.status_code, 400)\r\n obj = json.loads(response.content)\r\n self.assertEqual(\r\n obj['value'],\r\n u'A level of education is required',\r\n )", "def test_creation_notallow_none():\n with pytest.raises(ValueError) as __:\n value = None\n __ = param.Integer(value=value, allow_None=False)" ]
[ "0.6407443", "0.6119699", "0.5791611", "0.56821656", "0.56788296", "0.56445104", "0.5627635", "0.5598608", "0.5586389", "0.5577874", "0.550885", "0.54858345", "0.5473265", "0.5447214", "0.5441613", "0.5421066", "0.5402424", "0.5385762", "0.5336041", "0.5326429", "0.5322604", "0.5322464", "0.53197896", "0.5319178", "0.53138006", "0.5312047", "0.530727", "0.5300698", "0.5297031", "0.5293593" ]
0.68223494
0
Successfully instantiating an effort field with a default effort unit.
def test_fields_effort_default_effort_unit_success(self, _mock_check): field = EffortField( time_units={"minute": ("minute", "minutes")}, default_effort_unit="minute" ) errors = field.check() self.assertEqual(len(errors), 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_glass_capacity__has_expected_default_value():\n glass = moet.create_glass(\"A\")\n assert glass.capacity == 250", "def test_fields_effort_default_effort_unit_invalid(self, _mock_check):\n field = EffortField(\n time_units={\"minute\": (\"minute\", \"minutes\")}, default_effort_unit=\"invalid\"\n )\n\n errors = field.check()\n self.assertEqual(len(errors), 1)\n\n error = errors[0]\n self.assertEqual(error.msg, \"'invalid' is not a valid time unit.\")\n self.assertEqual(error.obj, field)\n self.assertEqual(error.id, \"fields.E1012\")", "def setUp(self):\n self.salary = 40000\n self.custom_rise = 7500\n self.employee = Employee(\"Carlos\", \"Zapata\", self.salary)", "def test_fields_effort_default_reference_unit_success(self, _mock_check):\n field = EffortField(\n time_units={\"minute\": (\"minute\", \"minutes\")},\n default_reference_unit=\"minute\",\n )\n\n errors = field.check()\n self.assertEqual(len(errors), 0)", "def test_give_default_raise(self):\n self.employee.give_raise()\n self.assertEqual(55000, self.employee.salary)", "def test_investigation_type_setter(self):\n pt = PrepTemplate.create(self.metadata, self.new_raw_data,\n self.test_study, self.data_type_id)\n self.assertEqual(pt.investigation_type, None)\n pt.investigation_type = \"Other\"\n self.assertEqual(pt.investigation_type, 'Other')\n with self.assertRaises(QiitaDBColumnError):\n pt.investigation_type = \"should fail\"", "def test_give_default_raise(self):\n self.employee.give_raise()\n self.assertEqual(self.employee.salary, 55000)", "def test_employee_creation_bad_fields(self):\n helper = EmployeeHelper(name='Andrew', hired_on='2019-10-01T00:00:00', salary=None, department_id=None)\n result = self.client.execute(helper.get_create_employee_query())['data']['createEmployee']['employee']\n\n self.assertEqual(result['name'], helper.name)\n self.assertEqual(result['hiredOn'], helper.hired_on)\n self.assertEqual(result['salary'], 0)\n self.assertIsNone(result['departmentId'])", "def test_default_rise(self):\n self.employee.give_raise()\n self.assertEqual(self.employee.anual_salary, self.salary + 5000)", "def test_fields_effort_default_reference_unit_invalid(self, _mock_check):\n field = EffortField(\n time_units={\"minute\": (\"minute\", \"minutes\")},\n default_reference_unit=\"invalid\",\n )\n\n errors = field.check()\n self.assertEqual(len(errors), 1)\n\n error = errors[0]\n self.assertEqual(error.msg, \"'invalid' is not a valid time unit.\")\n self.assertEqual(error.obj, field)\n self.assertEqual(error.id, \"fields.E1013\")", "def test_give_default_raise(self):\n\t\tmy_employee = Employee('justin', 'williams', 80_000)\n\t\tmy_employee.give_raise()\n\t\tself.assertEqual(my_employee.salary, 85_000)", "def test_init(self):\r\n p = TaxonAssigner({})\r\n self.assertEqual(p.Name, 'TaxonAssigner')\r\n self.assertEqual(p.Params, {})", "def create_defect(jira_dict, issue):\n defect = deepcopy(jira_dict)\n\n if jira_dict[\"sdlc_phase\"].lower() == \"closed\":\n created_dt = datetime.datetime.strptime(defect[\"created\"], DATE_FORMAT)\n resolved_dt = datetime.datetime.strptime(defect[\"resolved\"], DATE_FORMAT)\n\n if (resolved_dt - created_dt).days == 0:\n defect[\"age\"] = 0 if (resolved_dt.month == created_dt.month and\n resolved_dt.day == created_dt.day) else 1\n else:\n timedelta = resolved_dt - created_dt\n defect[\"age\"] = int(round(float((timedelta.days*86400 + timedelta.seconds)/(86400)), 0))\n else:\n timedelta = datetime.datetime.strptime(defect[\"report_date\"], DATE_FORMAT) - datetime.datetime.strptime(defect[\"created\"], DATE_FORMAT)\n defect[\"age\"] = int(round(float((timedelta.days*86400 + timedelta.seconds)/(86400)), 0))\n\n return defect", "def test_new_Issue(self, requests_post, get_landowner):\n #requests_post.status_code.return_value = 200\n requests_post.json.return_value = {'features': []}\n get_landowner.return_value = 'TEST landowner'\n cat = Category(name=\"test category\")\n cat.save()\n issue = Issue(description=\"test issue\", position=Point(5, 23), category=cat)\n issue.save()\n self.assertEqual(len(Issue.objects.all()), 1)\n issue = Issue(id=666, description=\"test issue with defined id\", position=Point(5, 23), category=cat)\n issue.save()\n self.assertEqual(issue.id, 666)", "def test_auto_assign_one(self):\n shift1 = RegularWorkshift.objects.create(\n workshift_type=self.wtype1,\n pool=self.p1,\n hours=5,\n )\n unfinished = utils.auto_assign_shifts(self.semester)\n self.assertEqual([], unfinished)\n self.assertIn(self.profile, shift1.current_assignees.all())\n\n instances = WorkshiftInstance.objects.filter(weekly_workshift=shift1)\n self.assertGreater(instances.count(), 0)\n self.assertTrue(all(\n instance.workshifter == self.profile\n for instance in instances\n ))\n\n pool_hours = self.profile.pool_hours.get(pool=self.p1)\n self.assertEqual(\n pool_hours.assigned_hours,\n pool_hours.hours,\n )", "def test_issue_create_issue(self):\n pass", "def test_init(self):\r\n p = BlastTaxonAssigner({})\r\n self.assertEqual(p.Name, 'BlastTaxonAssigner')\r\n # default parameters correctly initialized\r\n default_params = {'Min percent identity': 90.0,\r\n 'Max E value': 1e-30,\r\n 'Application': 'blastn/megablast'}\r\n self.assertEqual(p.Params, default_params)", "def test_give_default_raise(self):\n self.my_employee.give_raise()\n self.assertEqual(self.my_employee.money, 15000)", "def test_fields_effort_time_units_required(self, _mock_check):\n field = EffortField()\n errors = field.check()\n self.assertEqual(len(errors), 1)\n error = errors[0]\n self.assertEqual(\n error.msg, \"Effort fields must define a 'time_units' attribute.\"\n )\n self.assertEqual(error.obj, field)\n self.assertEqual(error.id, \"fields.E1010\")", "def test_init_optional(self):\n # Now confirm that we can pass gross_income, spouse,\n # tax_treatment, and initial_year\n gross_income = Money(100000)\n person1 = Person(\n self.initial_year, self.name, self.birth_date,\n retirement_date=self.retirement_date,\n gross_income=gross_income,\n spouse=None, tax_treatment=self.tax_treatment)\n self.assertEqual(person1.gross_income, gross_income)\n self.assertEqual(\n # pylint: disable=no-member\n # Pylint is confused by members added by metaclass\n person1.gross_income_history,\n {self.initial_year: gross_income}\n )\n self.assertEqual(person1.tax_treatment, self.tax_treatment)\n self.assertEqual(person1.initial_year, self.initial_year)\n self.assertIsNone(person1.spouse)\n self.assertEqual(person1.accounts, set())", "def test_init(self):\r\n p = Aligner({})\r\n self.assertEqual(p.Name, 'Aligner')\r\n self.assertEqual(p.Params, {})", "def test_default_missing_honor(self):\r\n self.url_params['honor_code'] = ''\r\n response = self.client.post(self.url, self.url_params)\r\n self.assertEqual(response.status_code, 400)\r\n obj = json.loads(response.content)\r\n self.assertEqual(\r\n obj['value'],\r\n u'To enroll, you must follow the honor code.',\r\n )", "def __init__(self, eid: str, name: str, hours_worked: int, hours_rate: int):\n pay.HourlyPolicy.__init__(self, hours_worked, hours_rate)\n super().__init__(eid, name)", "def default_usage(\n PK=1,\n UsageActualName=\"test_usage_actual_name\",\n UsageAmount=Decimal(100.0),\n RateComponent=\"test_rate_component\",\n EnergyUnit=\"test_energy_unit\",\n IntervalStart=date(2000, 1, 1),\n IntervalEnd=date(2000, 2, 1),\n):\n return Usage(\n PK=PK,\n UsageActualName=UsageActualName,\n UsageAmount=UsageAmount,\n RateComponent=RateComponent,\n EnergyUnit=EnergyUnit,\n IntervalStart=IntervalStart,\n IntervalEnd=IntervalEnd,\n )", "def __init__(self, project, issuetype):\n \n super(ClearQuestCalculator, self).__init__(project, issuetype)", "def test_auto_assign_one_overflow(self):\n shift1 = RegularWorkshift.objects.create(\n workshift_type=self.wtype1,\n pool=self.p1,\n hours=6,\n )\n unfinished = utils.auto_assign_shifts(self.semester)\n self.assertEqual([self.profile], unfinished)\n self.assertNotIn(self.profile, shift1.current_assignees.all())\n\n instances = WorkshiftInstance.objects.filter(weekly_workshift=shift1)\n self.assertGreater(instances.count(), 0)\n self.assertTrue(all(\n instance.workshifter is None\n for instance in instances\n ))\n\n pool_hours = self.profile.pool_hours.get(pool=self.p1)\n self.assertEqual(\n pool_hours.assigned_hours,\n 0,\n )", "def setUp(self):\n self.employee = Employee('Lucas', 'Guerra', 45000)", "def pt_effort(self):\n if not self.effort:\n return \"\"\n\n (effort, effort_unit) = self.effort\n unit_letter = effort_unit[0].upper()\n return f\"PT{effort:d}{unit_letter:s}\"", "def test_give_default_raise(self):\n\t\tself.mason.give_raise() \n\t\tself.assertEqual(self.mason.salary, 15000)", "def test_init_basic(self):\n person = Person(\n self.initial_year, self.name, self.birth_date,\n retirement_date=self.retirement_date)\n self.assertEqual(person.name, self.name)\n self.assertEqual(person.birth_date, self.birth_date)\n self.assertEqual(person.retirement_date, self.retirement_date)\n self.assertIsInstance(person.name, str)\n self.assertIsInstance(person.birth_date, datetime)\n self.assertIsInstance(person.retirement_date, datetime)\n self.assertIsNone(person.spouse)\n self.assertIsNone(person.tax_treatment)" ]
[ "0.5566992", "0.55520004", "0.52832985", "0.5258384", "0.52392936", "0.52114844", "0.5147716", "0.51188076", "0.5114156", "0.50971746", "0.5062016", "0.5045789", "0.50389683", "0.5030574", "0.50296533", "0.5029191", "0.5028355", "0.5025197", "0.5020525", "0.50115055", "0.49750078", "0.49649653", "0.49556983", "0.49541733", "0.49528086", "0.49288684", "0.49173972", "0.49035847", "0.4890696", "0.48799348" ]
0.5687161
0
Trying to instantiate an effort field with an invalid default reference unit should not pass the checks.
def test_fields_effort_default_reference_unit_invalid(self, _mock_check): field = EffortField( time_units={"minute": ("minute", "minutes")}, default_reference_unit="invalid", ) errors = field.check() self.assertEqual(len(errors), 1) error = errors[0] self.assertEqual(error.msg, "'invalid' is not a valid time unit.") self.assertEqual(error.obj, field) self.assertEqual(error.id, "fields.E1013")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_fields_effort_default_effort_unit_invalid(self, _mock_check):\n field = EffortField(\n time_units={\"minute\": (\"minute\", \"minutes\")}, default_effort_unit=\"invalid\"\n )\n\n errors = field.check()\n self.assertEqual(len(errors), 1)\n\n error = errors[0]\n self.assertEqual(error.msg, \"'invalid' is not a valid time unit.\")\n self.assertEqual(error.obj, field)\n self.assertEqual(error.id, \"fields.E1012\")", "def test_fields_effort_default_reference_unit_success(self, _mock_check):\n field = EffortField(\n time_units={\"minute\": (\"minute\", \"minutes\")},\n default_reference_unit=\"minute\",\n )\n\n errors = field.check()\n self.assertEqual(len(errors), 0)", "def test_fields_effort_default_effort_unit_success(self, _mock_check):\n field = EffortField(\n time_units={\"minute\": (\"minute\", \"minutes\")}, default_effort_unit=\"minute\"\n )\n\n errors = field.check()\n self.assertEqual(len(errors), 0)", "def testDefaultFields_InvalidSingle(self):\n def action(field_class):\n self.assertRaises(messages.InvalidDefaultError,\n field_class,\n 1,\n default=object())\n self.ActionOnAllFieldClasses(action)", "def test_defining_only_or_defer_on_nonexistant_fields_fails(self):", "def test_alright_when_required_field_is_missing_but_default_is_given():\n\n model_definition = {'language': {'type': 'fixed',\n 'required': True,\n 'persisted': True,\n 'default': 'portuguese'},\n 'source': {'type': 'list',\n 'required': False,\n 'persisted': True}}\n product1 = {'source': ['Whatever']}\n factory = ProductModelFactory(model_definition)\n factory.build('product1', product1)\n # Ok. No exceptions were raised.", "def test_glass_capacity__has_expected_default_value():\n glass = moet.create_glass(\"A\")\n assert glass.capacity == 250", "def test_set_outside_bounds_default_value(self):\n with pytest.raises(ValueError):\n Real(\"yolo\", \"uniform\", -3, 2, default_value=5)", "def test_raise_on_missing_critical(self):\n name_for_field = 'absent_field'\n field_opts = {'names': (name_for_field, 'absent'), 'alt_field': '', 'computed': False}\n critical_fields = {'absent_field': field_opts}\n with self.assertRaises(ImproperlyConfigured):\n self.form.fields_for_critical(critical_fields)", "def test_init_invalid_order(self):\n with self.assertRaises(ValueError):\n Person(\n self.initial_year, self.name, self.birth_date,\n retirement_date=self.birth_date - relativedelta(days=1))", "def test_nonable_fields(declaration):\n\n if declaration == 'typing':\n from typing import Optional\n \n class Foo(object):\n a = field(type_hint=Optional[int], check_type=True)\n b = field(type_hint=Optional[int], validators={'is positive': lambda x: x > 0})\n c = field(nonable=False, check_type=True)\n d = field(validators={'accept_all': lambda x: True})\n e = field(nonable=False)\n\n elif declaration == 'default_value':\n class Foo(object):\n a = field(type_hint=int, default=None, check_type=True)\n b = field(type_hint=int, default=None, validators={'is positive': lambda x: x > 0})\n c = field(nonable=False, check_type=True)\n d = field(validators={'accept_all': lambda x: True})\n e = field(nonable=False)\n\n elif declaration == 'explicit_nonable':\n class Foo(object):\n a = field(type_hint=int, nonable=True, check_type=True)\n b = field(type_hint=int, nonable=True, validators={'is positive': lambda x: x > 0})\n c = field(nonable=False, check_type=True)\n d = field(validators={'accept_all': lambda x: True})\n e = field(nonable=False)\n\n else:\n raise ValueError(declaration)\n\n f = Foo()\n f.a = None\n f.b = None\n with pytest.raises(NoneError):\n f.c = None\n f.d = None\n f.e = None\n assert vars(f) == {'_a': None, '_b': None, '_d': None, 'e': None}", "def test_init(self):\n # test invalid normalized_items\n with self.assertRaisesRegex(\n KeyError, \"Should be one of 'bbox', 'head', 'torso'\"):\n JhmdbPCKAccuracy(norm_item='invalid')", "def test_field_value_default(self):\n field = ''\n ref_idx = []\n self.res=sdcal(infile=self.rawfile,calmode=self.calmode,field=field,outfile=self.outname,outform='ASAP')\n self.assertEqual(self.res,None,\n msg='Any error occurred during calibration')\n self._compare_with_analytic(self.outname, self.line, self.baseline, ref_idx)", "def test_default_zero_fields_validate(self):\r\n it = self.IntegerTest()\r\n it.validate()", "def test_init(self):\n # test invalid normalized_items\n with self.assertRaisesRegex(\n KeyError, \"Should be one of 'bbox', 'head', 'torso'\"):\n PCKAccuracy(norm_item='invalid')", "def test_default_value(self):\n dim = Fidelity(\"epoch\", 1, 2)\n assert dim.default_value == 2\n dim = Fidelity(\"epoch\", 1, 5)\n assert dim.default_value == 5", "def testDefaultFields_InvalidRepeated(self):\n self.assertRaisesWithRegexpMatch(\n messages.FieldDefinitionError,\n 'Repeated fields may not have defaults',\n messages.StringField, 1, repeated=True, default=[1, 2, 3])", "def test_default_required(self):\n schema = yaml.load(self.yaml_multiple_term, Loader=yaml.FullLoader)\n val = DwcaValidator(schema, error_handler=WhipErrorHandler)\n\n document = {'abundance': 'many'}\n val.validate(document)\n self.assertEqual(val.errors, {'eventDate': ['required field']})\n\n document = {'eventDate': '2018-01-01'}\n val.validate(document)\n self.assertEqual(val.errors, {})", "def test_init_with_default_value(self):\n with pytest.raises(NotImplementedError):\n Dimension(\"yolo\", \"uniform\", -3, 4, default_value=4)", "def test_schema_default_missing_validator_combinations(test_case):\n evaluate_test_cases([test_case])", "def test_fields_effort_time_units_required(self, _mock_check):\n field = EffortField()\n errors = field.check()\n self.assertEqual(len(errors), 1)\n error = errors[0]\n self.assertEqual(\n error.msg, \"Effort fields must define a 'time_units' attribute.\"\n )\n self.assertEqual(error.obj, field)\n self.assertEqual(error.id, \"fields.E1010\")", "def test_alright_when_non_required_field_is_missing():\n\n model_definition = {'language': {'type': 'fixed',\n 'required': True,\n 'persisted': True},\n 'source': {'type': 'list',\n 'required': False,\n 'persisted': True},\n 'resources.title': {'type': 'text',\n 'required': False,\n 'persisted': True}}\n product1 = {'language': 'english'}\n factory = ProductModelFactory(model_definition)\n factory.build('product1', product1)\n # Ok. No exceptions were raised.", "def testBadVariableCostItem(self):\n\t \n costs = pf.VariableCosts()\n self.assertRaises(pf.BadVariableExpenseItem, costs.add_variable_expense, \"ninja\")", "def test_set_outside_bounds_default_value(self):\n with pytest.raises(ValueError):\n Integer(\"yolo\", \"uniform\", -3, 2, default_value=4)", "def testDefaultFields_EnumInvalidDelayedResolution(self):\n field = messages.EnumField(\n 'apitools.base.protorpclite.descriptor.FieldDescriptor.Label',\n 1,\n default=200)\n\n self.assertRaisesWithRegexpMatch(TypeError,\n 'No such value for 200 in Enum Label',\n getattr,\n field,\n 'default')", "def test_get_fields_and_lookups_invalid_lookup(self):\n with self.assertRaises(exceptions.FieldError):\n utils.get_fields_and_lookups(Protected, 'protector__date__hour')", "def test_missing_required_field_raises_error():\n with pytest.raises(ValidationError):\n Entity()", "def test_init(self):\n # test invalid normalized_items\n with self.assertRaisesRegex(\n KeyError, \"Should be one of 'bbox', 'head', 'torso'\"):\n MpiiPCKAccuracy(norm_item='invalid')", "def test_set_glass_capacity__with_invalid_numbers__returns_expected():\n glass = moet.create_glass(\"A\")\n with pytest.raises(ValueError):\n glass.capacity = -100", "def test_creation_incorrect_change_hardbounds():\n with pytest.raises(ValueError) as __:\n value = 1\n int_a = param.Integer(value=value, hardbounds=[0, 10])\n int_a.hardbounds = [0, 10, 20]" ]
[ "0.6802078", "0.6742572", "0.65150154", "0.6235477", "0.60174453", "0.6001082", "0.5874366", "0.57094264", "0.56961966", "0.56684256", "0.56497365", "0.56440604", "0.5609837", "0.5588175", "0.5564511", "0.55639386", "0.55550784", "0.55489916", "0.55264205", "0.5523252", "0.5518091", "0.5497271", "0.54953164", "0.5495042", "0.5492536", "0.5473571", "0.5451796", "0.54482245", "0.544102", "0.5421672" ]
0.7106623
0
Successfully instantiating an effort field with a default reference unit.
def test_fields_effort_default_reference_unit_success(self, _mock_check): field = EffortField( time_units={"minute": ("minute", "minutes")}, default_reference_unit="minute", ) errors = field.check() self.assertEqual(len(errors), 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_fields_effort_default_reference_unit_invalid(self, _mock_check):\n field = EffortField(\n time_units={\"minute\": (\"minute\", \"minutes\")},\n default_reference_unit=\"invalid\",\n )\n\n errors = field.check()\n self.assertEqual(len(errors), 1)\n\n error = errors[0]\n self.assertEqual(error.msg, \"'invalid' is not a valid time unit.\")\n self.assertEqual(error.obj, field)\n self.assertEqual(error.id, \"fields.E1013\")", "def test_fields_effort_default_effort_unit_success(self, _mock_check):\n field = EffortField(\n time_units={\"minute\": (\"minute\", \"minutes\")}, default_effort_unit=\"minute\"\n )\n\n errors = field.check()\n self.assertEqual(len(errors), 0)", "def test_fields_effort_default_effort_unit_invalid(self, _mock_check):\n field = EffortField(\n time_units={\"minute\": (\"minute\", \"minutes\")}, default_effort_unit=\"invalid\"\n )\n\n errors = field.check()\n self.assertEqual(len(errors), 1)\n\n error = errors[0]\n self.assertEqual(error.msg, \"'invalid' is not a valid time unit.\")\n self.assertEqual(error.obj, field)\n self.assertEqual(error.id, \"fields.E1012\")", "def __init__(__self__, *,\n field_ref: Optional[str] = None,\n warning: Optional[str] = None):\n if field_ref is not None:\n pulumi.set(__self__, \"field_ref\", field_ref)\n if warning is not None:\n pulumi.set(__self__, \"warning\", warning)", "def test_glass_capacity__has_expected_default_value():\n glass = moet.create_glass(\"A\")\n assert glass.capacity == 250", "def __init__(self, reference=None):\n self.reference = reference", "def default_usage(\n PK=1,\n UsageActualName=\"test_usage_actual_name\",\n UsageAmount=Decimal(100.0),\n RateComponent=\"test_rate_component\",\n EnergyUnit=\"test_energy_unit\",\n IntervalStart=date(2000, 1, 1),\n IntervalEnd=date(2000, 2, 1),\n):\n return Usage(\n PK=PK,\n UsageActualName=UsageActualName,\n UsageAmount=UsageAmount,\n RateComponent=RateComponent,\n EnergyUnit=EnergyUnit,\n IntervalStart=IntervalStart,\n IntervalEnd=IntervalEnd,\n )", "def test_call_default_params(self):\r\n\r\n exp = {'0': ('R27DLI_4812', 'CTGGGCCGTATCTC'),\r\n 'ref1': ('ref1', 'GGGGGGGAAAAAAAAAAAAA'),\r\n '2': ('W3Cecum_4858', 'TTGGGCCGTGTCTCAGT'),\r\n 'ref0': ('ref0', 'CCCAAAAAAATTTTTT'),\r\n }\r\n app = ReferenceRepSetPicker(params={'Algorithm': 'first',\r\n 'ChoiceF': first_id})\r\n obs = app(self.tmp_seq_filepath,\r\n self.tmp_otu_filepath,\r\n self.ref_seq_filepath)\r\n self.assertEqual(obs, exp)", "def test_field_value_default(self):\n field = ''\n ref_idx = []\n self.res=sdcal(infile=self.rawfile,calmode=self.calmode,field=field,outfile=self.outname,outform='ASAP')\n self.assertEqual(self.res,None,\n msg='Any error occurred during calibration')\n self._compare_with_analytic(self.outname, self.line, self.baseline, ref_idx)", "def __init__(__self__, *,\n field_ref: str,\n warning: str):\n pulumi.set(__self__, \"field_ref\", field_ref)\n pulumi.set(__self__, \"warning\", warning)", "def test_alright_when_required_field_is_missing_but_default_is_given():\n\n model_definition = {'language': {'type': 'fixed',\n 'required': True,\n 'persisted': True,\n 'default': 'portuguese'},\n 'source': {'type': 'list',\n 'required': False,\n 'persisted': True}}\n product1 = {'source': ['Whatever']}\n factory = ProductModelFactory(model_definition)\n factory.build('product1', product1)\n # Ok. No exceptions were raised.", "def test_entities__FieldCustomization__default_value__1(address_book):\n fc = IFieldCustomization(address_book)\n assert u'Time zone' == fc.default_value(IAddressBook['time_zone'], 'label')", "def default_charge(\n PK=1,\n ChargeActualName=\"test_charge_actual_name\",\n ChargeAmount=Decimal(100.0),\n UsageUnit=\"kW\",\n ChargeUnitsUsed=Decimal(20.0),\n ChargeRatePerUnit=Decimal(5.0),\n ThirdPartyProvider=\"test_third_party_provider\",\n IsAdjustmentCharge=False,\n IntervalStart=date(2000, 1, 1),\n IntervalEnd=date(2000, 2, 1),\n):\n return Charge(\n PK=PK,\n ChargeActualName=ChargeActualName,\n ChargeAmount=ChargeAmount,\n UsageUnit=UsageUnit,\n ChargeUnitsUsed=ChargeUnitsUsed,\n ChargeRatePerUnit=ChargeRatePerUnit,\n ThirdPartyProvider=ThirdPartyProvider,\n IsAdjustmentCharge=IsAdjustmentCharge,\n IntervalStart=IntervalStart,\n IntervalEnd=IntervalEnd,\n )", "def test_init(self):\r\n p = BlastTaxonAssigner({})\r\n self.assertEqual(p.Name, 'BlastTaxonAssigner')\r\n # default parameters correctly initialized\r\n default_params = {'Min percent identity': 90.0,\r\n 'Max E value': 1e-30,\r\n 'Application': 'blastn/megablast'}\r\n self.assertEqual(p.Params, default_params)", "def testDefaultFields_None(self):\n def action(field_class):\n field_class(1, default=None)\n field_class(1, required=True, default=None)\n field_class(1, repeated=True, default=None)\n self.ActionOnAllFieldClasses(action)", "def Reference(self, default={}):\n return HEP.ReferenceHEPObject(self.data.get('reference', default))", "def _create_user_defined_field(address_book, field_type, field_value):\n field_name = FieldFactory(\n address_book, IPostalAddress, field_type, u'distance').__name__\n return PostalAddressFactory(\n UpdateablePersonFactory(address_book),\n **{field_name: field_value, 'set_as_default': True})", "def _initializeRequestField(self,field,referenceField):\n\t\tvaluesDict = referenceField.values\n\t\tfield.initialize_values(valuesDict)\n\t\t\n\t\tpass", "def __init__(self, reference_value=None, measured_value=None):\n self.reference_value = reference_value\n self.measured_value = measured_value", "def test_entities__NoFieldCustomization__default_value__1(root_folder):\n nfc = IFieldCustomization(root_folder)\n assert u'Time zone' == nfc.default_value(\n IAddressBook['time_zone'], 'label')", "def __init__(self, project, issuetype):\n \n super(ClearQuestCalculator, self).__init__(project, issuetype)", "def testDefaultFields_InvalidSingle(self):\n def action(field_class):\n self.assertRaises(messages.InvalidDefaultError,\n field_class,\n 1,\n default=object())\n self.ActionOnAllFieldClasses(action)", "def __init__(self, ideal: np.ndarray, nadir: np.ndarray) -> None:\n\n self.n_objectives = len(ideal)\n self._ideal = ideal\n self._nadir = nadir\n\n msg = \"Please specify a reference point as 'reference_point'.\"\n content = {\n \"message\": msg,\n \"ideal\": ideal,\n \"nadir\": nadir,\n }\n\n super().__init__(\"reference_point_preference\", \"required\", content=content)", "def test_init__default(self):\n fact_query = FactQuery()\n self.assertIsNone(fact_query.parsed_query)", "def test_give_default_raise(self):\n self.employee.give_raise()\n self.assertEqual(55000, self.employee.salary)", "def create_defect(jira_dict, issue):\n defect = deepcopy(jira_dict)\n\n if jira_dict[\"sdlc_phase\"].lower() == \"closed\":\n created_dt = datetime.datetime.strptime(defect[\"created\"], DATE_FORMAT)\n resolved_dt = datetime.datetime.strptime(defect[\"resolved\"], DATE_FORMAT)\n\n if (resolved_dt - created_dt).days == 0:\n defect[\"age\"] = 0 if (resolved_dt.month == created_dt.month and\n resolved_dt.day == created_dt.day) else 1\n else:\n timedelta = resolved_dt - created_dt\n defect[\"age\"] = int(round(float((timedelta.days*86400 + timedelta.seconds)/(86400)), 0))\n else:\n timedelta = datetime.datetime.strptime(defect[\"report_date\"], DATE_FORMAT) - datetime.datetime.strptime(defect[\"created\"], DATE_FORMAT)\n defect[\"age\"] = int(round(float((timedelta.days*86400 + timedelta.seconds)/(86400)), 0))\n\n return defect", "def field_init(self, *args, **kwargs):\n humanize_func = kwargs.pop('humanized', None)\n if humanize_func:\n def humanize(val, inst, *args, **kwargs):\n return humanize_func(val, inst, field=self, *args, **kwargs)\n self.humanized = humanize\n else:\n self.humanized = self.default_humanized\n getattr(self, '_init_chamber_patch_')(*args, **kwargs)", "def defaults():\n\n #dummy = FieldTemplate.dummy\n\n return None", "def test_init_optional(self):\n # Now confirm that we can pass gross_income, spouse,\n # tax_treatment, and initial_year\n gross_income = Money(100000)\n person1 = Person(\n self.initial_year, self.name, self.birth_date,\n retirement_date=self.retirement_date,\n gross_income=gross_income,\n spouse=None, tax_treatment=self.tax_treatment)\n self.assertEqual(person1.gross_income, gross_income)\n self.assertEqual(\n # pylint: disable=no-member\n # Pylint is confused by members added by metaclass\n person1.gross_income_history,\n {self.initial_year: gross_income}\n )\n self.assertEqual(person1.tax_treatment, self.tax_treatment)\n self.assertEqual(person1.initial_year, self.initial_year)\n self.assertIsNone(person1.spouse)\n self.assertEqual(person1.accounts, set())", "def test_init(self):\r\n p = TaxonAssigner({})\r\n self.assertEqual(p.Name, 'TaxonAssigner')\r\n self.assertEqual(p.Params, {})" ]
[ "0.6068699", "0.57065034", "0.5490475", "0.5445494", "0.541955", "0.5358551", "0.5281326", "0.52773386", "0.5230165", "0.5155736", "0.5153045", "0.512243", "0.50940686", "0.50860965", "0.50385386", "0.50339186", "0.502558", "0.502396", "0.5011245", "0.49972126", "0.49727124", "0.49558723", "0.49539876", "0.49496463", "0.4935644", "0.49276593", "0.4917652", "0.48937604", "0.4891352", "0.48878524" ]
0.6300686
0
Ensures request is Json, session is active and the correct session key is supplied
def _ValidRequest(request): if not request.json: abort(400) sessId = request.json['sessionId'] sessKey = request.json['sessionKey'] # Check if it is active and correct key return database.SessionActive(sessId) and database.CorrectSessionKey(sessId, sessKey)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def view_session(request: Request):\n return JSONResponse(request.session)", "def test_new_session_created_with_auth_json_no_cookie(self):\n\n with self.app_sess1 as c:\n data = {\n \"token\": \"pretend_token\"\n }\n ret = c.post('/', data=json.dumps(data), headers={'Content-Type': 'application/json'})\n self.assertNotIn('Set-Cookie', ret.headers)", "def _process_request(self, request):\n\n request.horizon = {'dashboard': None,\n 'panel': None,\n 'async_messages': []}\n if not hasattr(request, \"user\") or not request.user.is_authenticated:\n # proceed no further if the current request is already known\n # not to be authenticated\n # it is CRITICAL to perform this check as early as possible\n # to avoid creating too many sessions\n return None\n\n # Since we know the user is present and authenticated, lets refresh the\n # session expiry if configured to do so.\n if settings.SESSION_REFRESH:\n timeout = settings.SESSION_TIMEOUT\n token_life = request.user.token.expires - datetime.datetime.now(\n pytz.utc)\n session_time = min(timeout, int(token_life.total_seconds()))\n request.session.set_expiry(session_time)\n\n if http_utils.is_ajax(request):\n # if the request is Ajax we do not want to proceed, as clients can\n # 1) create pages with constant polling, which can create race\n # conditions when a page navigation occurs\n # 2) might leave a user seemingly left logged in forever\n # 3) thrashes db backed session engines with tons of changes\n return None\n # If we use cookie-based sessions, check that the cookie size does not\n # reach the max size accepted by common web browsers.\n if (\n settings.SESSION_ENGINE ==\n 'django.contrib.sessions.backends.signed_cookies'\n ):\n max_cookie_size = settings.SESSION_COOKIE_MAX_SIZE\n session_cookie_name = settings.SESSION_COOKIE_NAME\n session_key = request.COOKIES.get(session_cookie_name)\n if max_cookie_size is not None and session_key is not None:\n cookie_size = sum((\n len(key) + len(value)\n for key, value in request.COOKIES.items()\n ))\n if cookie_size >= max_cookie_size:\n LOG.error(\n 'Total Cookie size for user_id: %(user_id)s is '\n '%(cookie_size)sB >= %(max_cookie_size)sB. '\n 'You need to configure file-based or database-backed '\n 'sessions instead of cookie-based sessions: '\n 'https://docs.openstack.org/horizon/latest/'\n 'admin/sessions.html',\n {\n 'user_id': request.session.get(\n 'user_id', 'Unknown'),\n 'cookie_size': cookie_size,\n 'max_cookie_size': max_cookie_size,\n }\n )\n\n tz = utils.get_timezone(request)\n if tz:\n timezone.activate(tz)", "def session(self, request):\n if request.method != 'GET':\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n data = {'valid': request.user.is_authenticated()}\n return Response(data, status=status.HTTP_200_OK)", "def test_new_session_create_with_auth_json(self):\n\n with self.app_sess1 as c:\n data = {\n \"token\": \"pretend_token\"\n }\n ret1 = c.post('/', data=json.dumps(data), headers={'Content-Type': 'application/json'})\n ret2 = c.get('/', headers={'X-Auth-Token': 'pretend_token'})\n\n self.assertEqual(ret1.data, ret2.data)", "def _check_session_valid(request):\n if not request.user.is_authenticated:\n return HttpResponseForbidden(reason=\"Access denied!\")\n\n if \"analytics\" not in request.session:\n err = \"Could not fetch analytic session data.\"\n return HttpResponseBadRequest(reason=err)\n\n return None", "def get_session_params(request):\n\n json_resp = {}\n usecase = request.session.get('usecase',None)\n language = request.session.get('language',None)\n institute = request.session.get('institute',None)\n annotation = request.session.get('mode',None)\n team_member = request.session.get('team_member',None)\n report_type = request.session.get('report_type',None)\n batch = request.session.get('batch',None)\n\n if batch is not None and report_type is not None and usecase is not None and language is not None and institute is not None and annotation is not None:\n json_resp['usecase'] = usecase\n json_resp['language'] = language\n json_resp['institute'] = institute\n json_resp['team_member'] = team_member\n json_resp['report_type'] = report_type\n json_resp['batch'] = batch\n if annotation == 'Human':\n json_resp['annotation'] = 'Manual'\n elif annotation == 'Robot':\n json_resp['annotation'] = 'Automatic'\n else:\n json_resp['usecase'] = ''\n json_resp['language'] = ''\n json_resp['institute'] = ''\n json_resp['batch'] = ''\n if User.objects.filter(profile='Admin').exists():\n admin = User.objects.filter(profile='Admin')\n admin = admin.first()\n admin_name = admin.username\n json_resp['team_member'] = admin_name\n else:\n json_resp['team_member'] = 'Test'\n json_resp['annotation'] = ''\n json_resp['report_type'] = ''\n return JsonResponse(json_resp)", "def handle_session_mgt(request):\n db = cloud_db.DbManager()\n try:\n if request.method == 'POST':\n ### login(signin) ###\n if len(request.body) == 0:\n raise Exception(MSG_NODATA)\n data = json.loads(request.body)\n\n if not data.get('user_id') or not data.get('password'):\n raise Exception(MSG_INVALID_PARAMS)\n if request.session.get('user'):\n raise Exception(MSG_ALREADY_LOGGEDIN)\n user_id = data['user_id']\n password = data['password']\n\n authenticated = db.check_authentication(user_id)\n if authenticated is None:\n raise Exception(MSG_UNKNOWN_ERROR)\n elif authenticated is False:\n return JsonResponse(constants.CODE_NEED_AUTH)\n\n user = {}\n intpr_session = {}\n user_type = db.retrieve_user_type(user_id, password)\n if user_type is None:\n raise Exception(MSG_INVALID_IDPW)\n elif user_type == 'patient':\n user = db.retrieve_patient(user_id, password)\n intpr_session['sessions'] = db.retrieve_patient_session(data['user_id'])\n elif user_type == 'physician':\n user = db.retrieve_physician(user_id, password)\n intpr_session['sessions'] = db.retrieve_physician_session(data['user_id'])\n else:\n raise Exception(MSG_INVALID_IDPW)\n if not user.get('user_id'):\n raise Exception(MSG_INVALID_IDPW)\n\n # intpr session check\n new_flag = 0\n for session in intpr_session['sessions']:\n if session['status'] == 0:\n new_flag = 1\n intpr_session['new'] = new_flag\n\n # set sessions\n request.session['user'] = user\n # pprint(user)\n request.session['intpr_session'] = intpr_session\n # request.session['medical_image'] = {}\n # request.session.create('medical_image')\n logger.info('user %s logged in.' % user['user_id'])\n\n return JsonResponse(constants.CODE_SUCCESS)\n\n elif request.method == 'DELETE':\n ### Logout ###\n if request.session.get('user'):\n # del request.session['user']\n request.session.clear()\n return JsonResponse(constants.CODE_SUCCESS)\n else:\n raise Exception(MSG_NO_USER_LOGGEDIN)\n\n except Exception as e:\n logger.exception(e)\n return JsonResponse(dict(constants.CODE_FAILURE, **{'msg': str(e)}))\n\n return JsonResponse(dict(constants.CODE_FAILURE, **{'msg': MSG_UNKNOWN_ERROR}))", "def _create_session(self):\n self.session = requests.Session() # pragma: no cover\n self.session.headers[\"Accept\"] = \"application/json\" # pragma: no cover\n if self.user: # pragma: no cover\n self.session.auth = (self.user, self.cred) # pragma: no cover", "async def tus_check_session(request: web.Request) -> web.Response:\n ctx: Context = request.app[\"ctx\"]\n secret = ctx.local_config[\"storage-proxy\"][\"secret\"]\n async with check_params(\n request,\n t.Dict(\n {\n t.Key(\"token\"): tx.JsonWebToken(\n secret=secret, inner_iv=upload_token_data_iv\n ),\n }\n ),\n read_from=CheckParamSource.QUERY,\n ) as params:\n token_data = params[\"token\"]\n async with ctx.get_volume(token_data[\"volume\"]) as volume:\n headers = await prepare_tus_session_headers(request, token_data, volume)\n return web.Response(headers=headers)", "def check_session(wrapped):\n\n def check(request, *arg, **kwargs):\n collection = request.GET.get('collection', None)\n journal = request.GET.get('journal', None)\n document = request.GET.get('document', None)\n range_start = request.GET.get('range_start', None)\n under_development = request.GET.get('under_development', None)\n range_end = request.GET.get('range_end', None)\n py_range = request.GET.get('py_range', None)\n sa_scope = sorted([v for k, v in request.GET.items() if k == 'sa_scope'])\n la_scope = sorted([v for k, v in request.GET.items() if k == 'la_scope'])\n locale = request.GET.get('_LOCALE_', request.locale_name)\n\n if journal == 'clean' and 'journal' in request.session:\n del request.session['journal']\n document = None\n journal = None\n if 'document' in request.session:\n del request.session['document']\n document = None\n\n if document == 'clean' and 'document' in request.session:\n del request.session['document']\n document = None\n\n session_under_development = request.session.get('under_development', None)\n session_collection = request.session.get('collection', None)\n session_journal = request.session.get('journal', None)\n session_document = request.session.get('document', None)\n session_range_start = request.session.get('range_start', None)\n session_range_end = request.session.get('range_end', None)\n session_py_range = request.session.get('py_range', None)\n session_sa_scope = sorted(request.session.get('sa_scope', []))\n session_la_scope = sorted(request.session.get('la_scope', []))\n session_locale = request.session.get('_LOCALE_', None)\n\n if collection and collection != session_collection:\n request.session['collection'] = collection\n if 'journal' in request.session:\n del request.session['journal']\n elif not session_collection:\n request.session['collection'] = 'scl'\n\n if under_development and under_development != session_under_development:\n request.session['under_development'] = under_development\n\n if journal and journal != session_journal:\n request.session['journal'] = journal\n\n if document and document != session_document:\n request.session['document'] = document\n request.session['journal'] = document[1:10]\n\n if range_start and range_start != session_range_start:\n request.session['range_start'] = range_start\n\n if range_end and range_end != session_range_end:\n request.session['range_end'] = range_end\n\n if py_range and py_range != session_py_range:\n request.session['py_range'] = py_range\n\n if sa_scope and sorted(sa_scope) != sorted(session_sa_scope):\n request.session['sa_scope'] = sorted(sa_scope)\n\n if la_scope and sorted(la_scope) != sorted(session_la_scope):\n request.session['la_scope'] = sorted(la_scope)\n\n if locale and locale != session_locale:\n request.session['_LOCALE_'] = locale\n\n return wrapped(request, *arg, **kwargs)\n\n check.__doc__ = wrapped.__doc__\n\n return check", "def get_session(session_id):\n response_dict = {}\n if request.method == 'POST' and request.json:\n # First Time creation\n # with or without json data\n # session_id = request.json.get('session_id')\n if not session_id:\n return return_response({\"message\": \"Something is missing, \"\n \"read the API docs for \"\n \"more information.\"}, 403)\n if is_active_session(session_id):\n return return_response({\"message\": \"Conflict, ID already exists. Use PUT instead of POST.\"}, 409)\n if request.json:\n update_or_create_session(session_id=session_id, data=request.json.get('data'))\n response_dict['ok'] = True\n elif request.method == 'PUT' and request.json:\n # Updating information in session\n if not session_id:\n return return_response({\"message\": \"Something is missing, \"\n \"read the API docs for \"\n \"more information.\"}, 403)\n if request.json:\n update_or_create_session(session_id=session_id, data=request.json.get('data'))\n response_dict['ok'] = True\n elif request.method == 'GET':\n # Getting information for a session_id or get new random session_id\n if session_id is None:\n response_dict['session_id'] = generate_random_session_id()\n else:\n data = get_session_data(session_id=session_id)\n if data is not None:\n response_dict = {'data': data, 'ok': True}\n else:\n return return_response({\"message\": \"ID does not exists\"}, 404)\n else:\n pass\n\n return return_response(response_dict)", "def parse(self, body):\n if isinstance(body, six.string_types):\n body = json.loads(body)\n\n # version\n version = body['version']\n self.version = version\n\n # session\n session = body['session']\n self.session.new = session['new']\n self.session.session_id = session['sessionId']\n application_id = session['application']['applicationId']\n self.session.application.application_id = application_id\n if 'attributes' in session and session['attributes']:\n self.session.attributes = session.get('attributes', {})\n else:\n self.session.attributes = {}\n self.session.user.user_id = session['user']['userId']\n self.session.user.access_token = session['user'].get('accessToken', 0)\n\n # request\n request = body['request']\n\n # launch request\n if request['type'] == 'LaunchRequest':\n self.request = LaunchRequest()\n\n # intent request\n elif request['type'] == 'IntentRequest':\n self.request = IntentRequest()\n self.request.intent = Intent()\n intent = request['intent']\n self.request.intent.name = intent['name']\n if 'slots' in intent and intent['slots']:\n for name, slot in six.iteritems(intent['slots']):\n self.request.intent.slots[name] = Slot()\n self.request.intent.slots[name].name = slot['name']\n self.request.intent.slots[name].value = slot.get('value')\n\n # session ended request\n elif request['type'] == 'SessionEndedRequest':\n self.request = SessionEndedRequest()\n self.request.reason = request['reason']\n\n # common - keep after specific requests to prevent param overwrite\n self.request.type = request['type']\n self.request.request_id = request['requestId']\n self.request.timestamp = request['timestamp']\n\n return self", "def sessionCheck(session, req):\n\tlog(\"sessionCheck called\", session, req)\n\tif config.AUTH_TYPE=='NONE':\n\t\tlog(\"sessionCheck passed\", session, req)\n\t\tpass\n\telif config.AUTH_TYPE=='HTTP':\n\t\tif req.user is None:\n\t\t\tlog(\"sessionCheck failed\", session, req)\n\t\t\traise Exception(\"HTTP authentication misconfiguration (req.user is None)\")\n\t\telse:\n\t\t\tlog(\"sessionCheck passed\", session, req)\n\telif config.AUTH_TYPE=='FORM':\n\t\tif session.is_new() or not session.has_key('username'):\n\t\t\tlog(\"sessionCheck failed\", session, req)\n\t\t\ttry:\n\t\t\t\tutil.redirect(req, 'login.psp?redirect=%s' % urllib.quote_plus(req.unparsed_uri))\n\t\t\texcept apache.SERVER_RETURN: #fix for pre-3.3.1 bug where it uses apache.OK instead of apache.DONE (https://issues.apache.org/jira/browse/MODPYTHON-140)\n\t\t\t\traise apache.SERVER_RETURN, apache.DONE\n\t\telse:\n\t\t\tlog(\"sessionCheck passed\", session, req)\n\telse:\n\t\traise Exception(\"sanity check\")", "def setsession(self, required):\n if self.__httprequest.POST:\n postdata = dict(self.__httprequest.POST.dict())\n #check if its LTI call\n if postdata.get(\"lti_message_type\") == \"basic-lti-launch-request\":\n if self.check_oauth():\n self.__httprequest.session[\"lti_validsession\"] = True\n self.__httprequest.session[\"lti_user_id\"] = postdata.get(\"user_id\")\n self.__httprequest.session[\"lti_role\"] = postdata.get(\"roles\")\n else:\n raise Http403(\"BAD OAUTH DATA\")\n elif self.is_valid() is not True and required:\n raise Http403(\"NOT AN LTI CALL AND LTI IS REQUIRED\")", "async def test_validate_session(api_client: TestClient, coresys: CoreSys):\n with patch(\"aiohttp.web_request.BaseRequest.__getitem__\", return_value=None):\n resp = await api_client.post(\n \"/ingress/validate_session\",\n json={\"session\": \"non-existing\"},\n )\n assert resp.status == 401\n\n with patch(\n \"aiohttp.web_request.BaseRequest.__getitem__\",\n return_value=coresys.homeassistant,\n ):\n resp = await api_client.post(\"/ingress/session\")\n result = await resp.json()\n\n assert \"session\" in result[\"data\"]\n session = result[\"data\"][\"session\"]\n assert session in coresys.ingress.sessions\n\n valid_time = coresys.ingress.sessions[session]\n\n resp = await api_client.post(\n \"/ingress/validate_session\",\n json={\"session\": session},\n )\n assert resp.status == 200\n assert await resp.json() == {\"result\": \"ok\", \"data\": {}}\n\n assert coresys.ingress.sessions[session] > valid_time", "def request_session(self):\n if not hasattr(self, \"_request_session\"):\n rqsid = self.shared_vars.pop(\"rqsid\", \"\")\n rqses = self.request_session_manager.pop_request_session(rqsid)\n\n if not rqses:\n if self.is_action():\n del session['VDOM_API_SESSIONS']\n raise RequestSessionDoesntExist\n\n rqses = self.request_session_manager.create_request_session()\n\n else:\n uuid = rqses[\"rqsid_uuid\"]\n if not self.verify_request_session_key(rqsid, uuid):\n del session['VDOM_API_SESSIONS']\n raise RequestSessionInvalidKey\n\n self._request_session = rqses\n\n return self._request_session", "def start_session(self, user):\r\n del user[\"password\"], user[\"_id\"]\r\n session[\"logged_in\"] = True\r\n session[\"user\"] = user\r\n return jsonify(user), 200", "def is_session_applicable(self, request, response) -> bool:\n return response.content_type == \"text/html\"", "async def session(self, request):\n body = await api_validate(SCHEMA_SESSION, request)\n self._check_password(body)\n\n # check TOTP\n if self.config.security_totp:\n totp = pyotp.TOTP(self.config.security_totp)\n if body[ATTR_TOTP] != totp.now():\n raise RuntimeError(\"Invalid TOTP token!\")\n\n # create session\n valid_until = datetime.now() + timedelta(days=1)\n session = hashlib.sha256(os.urandom(54)).hexdigest()\n\n # store session\n self.config.add_security_session(session, valid_until)\n return {ATTR_SESSION: session}", "def on_session_started(session_started_request, session):\n print(\"on_session_started requestId=\" + session_started_request['requestId'] + \", sessionId=\" + session['sessionId'])\n \n session['attributes'] = {\"currentQuestion\":0, \"score\":0, \"date\":datetime.datetime.now().strftime(\"%B-%d-%Y %I:%M%p\"), \"billNo\":\"\", \"age\":\"\", \"result\":[]}", "def __init__(self, session):\n if not (getattr(session, \"token\", None) and isinstance(session.token, dict)):\n raise exceptions.InvalidUsageError(\"Session object is not valid\")\n self._session = session", "def require_session(handler: _HandlerWithSession) -> Handler:\n\n @functools.wraps(handler)\n async def decorated(request: web.Request) -> web.Response:\n request_session_token = request.match_info[\"session\"]\n session = session_from_request(request)\n if not session or request_session_token != session.token:\n LOG.warning(f\"request for invalid session {request_session_token}\")\n return web.json_response(\n data={\n \"error\": \"bad-token\",\n \"message\": f\"No such session {request_session_token}\",\n },\n status=404,\n )\n return await handler(request, session)\n\n return decorated", "def _initialize_session(self):\n session = requests.Session()\n session.auth = (self.login, self.password)\n session.verify = False\n session.headers.update({'Accept': 'application/json'})\n session.headers.update({'Content-type': 'application/json'})\n return session", "def req_session():\n request = Request()\n session = PoorSession(request.secret_key)\n session.data['test'] = True\n session.write()\n request.cookies = session.cookie\n return request", "def __init__(self, url, username, password):\n self.session = requests.session()\n self.session.auth = (username, password)\n self.session.headers.update({\n 'Accept': JSON_CONTENT_TYPE,\n })\n self.url = url", "def test_write_load(self, req_session):\n session = PoorSession(SECRET_KEY)\n session.load(req_session.cookies)\n assert session.data == {'test': True}", "def _createSessionObject(self, request):\n # Preload necessary data items\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n user_id = user.email()\n # Get the conference entity\n conf = _getEntityByWebsafeKey(request.websafeConferenceKey,\n 'Conference')\n # Ensure that the current user is the conference organizer\n if user_id != conf.organizerUserId:\n raise endpoints.UnauthorizedException(\n 'Only the conference organizer can create a new session')\n # Verify that the speaker exists\n speaker = _getEntityByWebsafeKey(request.websafeSpeakerKey, 'Speaker')\n # Ensure that the user submitted the required name property\n if not request.name:\n raise endpoints.BadRequestException(\n \"Session 'name' field required\")\n # Copy SessionForm/ProtoRPC Message into dict\n data = {\n field.name: getattr(request, field.name) for field in\n request.all_fields()\n }\n # Remove data that isn't destined for the Session entity\n del data['websafeConferenceKey']\n del data['websafeSpeakerKey']\n del data['websafeKey']\n # Add default values for those missing in the data model\n for df in SESSION_DEFAULTS:\n if data[df] in (None, []):\n data[df] = SESSION_DEFAULTS[df]\n # Ensure the string version of typeOfSession is what is stored\n # in the NDB model\n data['typeOfSession'] = str(data['typeOfSession'])\n # Convert date from string to Date object\n if data['date'] is not None:\n try:\n data['date'] = datetime.strptime(\n data['date'][:10], '%Y-%m-%d').date()\n except:\n raise endpoints.BadRequestException(\n \"Invalid 'date' value\")\n # Convert startTime from string to Time object\n if data['startTime'] is not None:\n try:\n data['startTime'] = datetime.strptime(\n data['startTime'], '%H:%M').time()\n except:\n raise endpoints.BadRequestException(\n \"Invalid 'startTime' value\")\n # Create Session\n session = Session(**data)\n session.conference = conf.key\n session.speaker = speaker.key\n session.put()\n # Add the session key to the speaker's sessions list\n speaker.sessions.append(session.key)\n speaker.put()\n # Add a task to task queue which checks if the speaker of this session\n # should be the new featured speaker\n taskqueue.add(params={'websafeSpeakerKey': request.websafeSpeakerKey,\n 'websafeConferenceKey': request.websafeConferenceKey},\n url='/tasks/update_featured_speaker'\n )\n # Return SessionForm object\n return self._copySessionToForm(session)", "def on_session_started(session_started_request, session):\n \n #session.attributes['result_number'] = 1\n session['attributes'] = {}\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def load_json(self):\n try:\n self.request.arguments = json.loads(self.request.body)\n except ValueError:\n msg = \"Could not decode JSON: %s\" % self.request.body\n logger.debug(msg)\n raise tornado.web.HTTPError(400, msg)" ]
[ "0.6244704", "0.6231964", "0.6157186", "0.6138115", "0.6120776", "0.5998519", "0.5970557", "0.59146553", "0.58658266", "0.58612305", "0.5842166", "0.5771671", "0.5743049", "0.5724865", "0.5712974", "0.5704158", "0.56624156", "0.56435186", "0.563466", "0.5626631", "0.56000286", "0.5596796", "0.55784076", "0.55605364", "0.5554759", "0.55443645", "0.55133426", "0.5512088", "0.5446473", "0.54412115" ]
0.7161011
0
returns artist email if it is unique
def get_artist_email(): artist_email = input('Please enter artist\'s email: ') while controls_utils.artist_email_not_unique(artist_email): print('Artist\'s email must be unique. ') artist_email = input('Please enter artist\'s email: ') return artist_email
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_artist(self):\n artists = FileRecord.query(FileRecord.artist).distinct().filter(\n FileRecord.album == self.name).filter(\n FileRecord.year == self.year).all()\n\n if len(artists) > 1:\n return \"Various Artists\"\n elif len(artists) == 1:\n return artists[0][0]\n else:\n return \"(Unknown)\"", "def unique_email(cls, email):\n user_db = User.get_by('email', email)\n if user_db:\n raise ValueError('Sorry, this email is already taken.')\n return email", "def verification_unique_email(value: str) -> str:\n\n user = User.objects.filter(email=value)\n if len(user) == 0:\n return value\n else:\n raise serializers.ValidationError('User with given credentials already exist')", "def get_unique_artist(user):\n seen_ids = user.opinion_set.values_list(\"artist_id\", flat=True)\n return Artist.objects.exclude(id__in=seen_ids).order_by('?').first()", "def media_artist(self):\n if self._current_item[\"artistName\"] is not None:\n if self._current_item[\"year\"] is not None:\n return self._current_item[\"artistName\"] + \", \" + self._current_item[\"year\"]\n else:\n return self._current_item[\"artistName\"]\n elif self._current_item[\"author\"] is not None:\n if self._current_item[\"year\"] is not None:\n return self._current_item[\"author\"] + \", \" + self._current_item[\"year\"]\n else:\n return self._current_item[\"author\"]\n elif self._current_item[\"year\"] is not None:\n return \"Unknown, \" + str(self._current_item[\"year\"])\n return \"\"", "def get_salutation(email):\n return email.split(\"@\")[0].replace(\".\", \" \").title()", "def get_author_email(author, email):\n return encode_email(email, author, 'nav')", "def generate_email(name: str, email: str, archive_long_name: str):\n if email:\n return email.strip()\n else:\n user = name.title() + archive_long_name.title()\n return re.sub(r'\\W+', '', unidecode(user)) + '[email protected]'", "def clean_email(self):\r\n email = self.cleaned_data.get(\"email\")\r\n\r\n if not email: \r\n return email\r\n\r\n if User.objects.filter(email__iexact=email).exclude(pk=self.instance.pk):\r\n raise forms.ValidationError(\"That e-mail is already used.\")\r\n else:\r\n return email", "def test_duplicate_email(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n rv = self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.assertIn(b'Sorry email already exist', rv.data)", "def _add_artist(self, artist):\n\n insert_artist = 'INSERT INTO artists (name, email, artist_id) VALUES (?, ?, ?)'\n\n try:\n with sqlite3.connect(db_path) as conn:\n res = conn.execute(insert_artist, (artist.name, artist.email, artist.artist_id))\n new_id = res.lastrowid # Get the ID of the new row in the table\n artist.artist_id = new_id # Set this artist's ID\n conn.close()\n return True\n except sqlite3.IntegrityError:\n print(f'\\nError - Artist with that email is already in the database.\\n')\n return False", "def is_duplicate_email(email):\n users = User.objects.filter(email=email).values()\n if len(users):\n return True\n return False", "def clean_email(self):\n email = self.cleaned_data.get(\"email\")\n qs = User.objects.exclude(id=self.instance.id).filter(email=email)\n if len(qs) == 0:\n return email\n raise forms.ValidationError(\n ugettext(\"This email is already registered\"))", "def artist(self) -> str:\n return self._artist", "def ldap_get_email(self, user):\n result = super(Auth42, self)._search_not_empty(user)\n if result is not None:\n alias = result.get(\"alias\")[1]\n return alias\n\n return None", "def clean_email(self):\n existing = User.objects.filter(email__iexact=self.cleaned_data['email'])\n if existing.exists():\n raise forms.ValidationError(_(\"This email address is already in use. Please enter a different email \"\n \"address!\"))\n else:\n return self.cleaned_data['email']", "def email(self):\n # Look for a primary address\n useremail = UserEmail.query.filter_by(user_id=self.id, primary=True).first()\n if useremail:\n return useremail\n # No primary? Maybe there's one that's not set as primary?\n useremail = UserEmail.query.filter_by(user_id=self.id).first()\n if useremail:\n # XXX: Mark at primary. This may or may not be saved depending on\n # whether the request ended in a database commit.\n useremail.primary = True\n return useremail\n # This user has no email address. Return a blank string instead of None\n # to support the common use case, where the caller will use unicode(user.email)\n # to get the email address as a string.\n return u''", "def nickname(self):\n if (self.__email and self.__auth_domain and\n self.__email.endswith('@' + self.__auth_domain)):\n suffix_len = len(self.__auth_domain) + 1\n return self.__email[:-suffix_len]\n else:\n return self.__email", "def clean_email(self):\n try:\n user = User.objects.get(email__iexact=self.cleaned_data['email'])\n except User.DoesNotExist:\n return self.cleaned_data['email']\n except User.MultipleObjectsReturned:\n pass\n raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.')\n )", "def clean_email(self):\n email = self.cleaned_data.get(\"email\")\n qs = JOSReservation.objects.exclude(id=self.instance.id).filter(email=email)\n if len(qs) == 0:\n return email\n raise forms.ValidationError(\n ugettext(\"This email is already registered\"))", "def unique_registered_email(value):\n from .models import Person\n if Person.objects.filter(email=value).exists():\n raise ValidationError(_('Email already registered.'))", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def get_artist_info(self, artist):\n self.clear()\n ai = self._artist_info(artist)\n if ai != 0:\n return ai\n else:\n if 'the ' in artist:\n ai = self._artist_info(artist.replace('the ', ''))\n else:\n ai = self._artist_info('the ' + artist)\n if ai == 0:\n if 'junior' in artist:\n ai = self._artist_info(artist.replace('junior', 'jr.'))\n if ai == 0:\n if '-' in artist:\n ai = self._artist_info(artist.replace('-', ' '))\n return ai", "def get_artist(self) -> Optional[str]:\n return self.artist", "def test_email_not_unique(bot):\n expect_error(register, InputError, \"a\", \"abcdef\", \"a\", \"a\", bot.email)", "def displayname(self):\n return self.email", "def _complete_email(name):\n if '@' not in name:\n return name + '@chromium.org'\n return name" ]
[ "0.66312927", "0.6207215", "0.61675626", "0.6128956", "0.612181", "0.6059138", "0.6052142", "0.6011615", "0.60024756", "0.59820944", "0.5937926", "0.5915206", "0.58750767", "0.5872202", "0.58694685", "0.5866588", "0.58505774", "0.5826908", "0.58094186", "0.57954603", "0.5794705", "0.5790881", "0.5790881", "0.5790881", "0.5790881", "0.57873553", "0.57726556", "0.57442987", "0.5742965", "0.57400495" ]
0.76967454
0
returns artwork name if it is unique for creating new records
def get_new_artwork_name(): artwork_name = input('Please enter title of artwork: ') while not controls_utils.artwork_name_is_unique(artwork_name): print('Artwork name is taken') artwork_name = input('Please enter title of artwork: ') return artwork_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_artwork_name():\n artwork_name = input('Please enter title of artwork: ')\n if not controls_utils.artwork_name_is_unique(artwork_name):\n return artwork_name\n else:\n print('artwork not found')", "def try_create_uniqe_title(self,title,owner):\n if self.valid_title(title):\n for i in range (1,20):\n new_title=title+\"_\"+str(i)\n if self.unique_title(new_title,owner):\n return new_title\n return False\n else:\n return False", "def test_existing_file_name(self):\n\t\ttp = self.sess.query(sql.Post).filter(sql.Post.reddit_id == 't3_ahal9v').first()\n\t\tfile = ng.choose_file_name(tp.urls[0], tp, sql.session(), album_size=1)\n\t\tself.assertTrue(file.endswith(' - 2'), msg='Failed to increment duplicate post!')", "def _add_artwork(self, artwork):\n\n insert_artwork = 'INSERT INTO artworks (artwork, price, artist_id, for_sale) VALUES (?, ?, ?, ?)'\n\n try:\n with sqlite3.connect(db_path) as conn:\n conn.execute(insert_artwork, (artwork.artwork, artwork.price, artwork.artist_id, artwork.for_sale))\n # new_id = res.lastrowid # Get the ID of the new row in the table\n # artwork.artist_id = new_id # Set this artist's ID\n conn.close()\n return True\n except sqlite3.IntegrityError as e:\n print(f'\\nError - Artwork with that name is already in the database.\\n', e)\n return False", "def create_file_name(self):\n # create a unique id for the file name\n index = self.helpers.alpha_uuid()\n\n filename = self.form['FieldStorage'][self.image_cid].filename\n extension = guess_extension(guess_type(filename)[0])\n return ( # concatenates the following data\n self.articleData.get('directory') + # directory\n '/' + # slash\n self.articleData.get('article_name') + # the article name\n '-' + # hyphen character\n index + # the id of the image\n extension\n )", "def _create_unique_turtle_name(self):\n\n\t\tself._id_counter += 1\n\t\tnew_name = \"turtle{}\".format(self._id_counter)\n\n\t\tif self._has_turtle(new_name):\n\t\t\treturn self._create_unique_turtle_name()\n\n\t\treturn new_name", "def try_create_uniqe_title(self,title,plan_id):\n if self.valid_title(title):\n for i in range (1,20):\n new_title=title+\"_\"+str(i)\n if self.unique_title(new_title,plan_id):\n return new_title\n return False\n else:\n return False", "def make_record_name(presentation):\r\n tags = [\r\n make_shortname(presentation.event),\r\n make_shortname(presentation.room),\r\n make_shortname(presentation.speaker),\r\n make_shortname(presentation.title),\r\n ]\r\n record_name = unicode('-'.join(tag for tag in tags if tag))\r\n\r\n # Convert unicode filenames to their equivalent ascii so that\r\n # we don't run into issues with gstreamer or filesystems.\r\n safe_record_name = unicodedata.normalize('NFKD', record_name).encode('ascii', 'ignore')\r\n\r\n return safe_record_name or 'default'", "def _get_artist(self):\n artists = FileRecord.query(FileRecord.artist).distinct().filter(\n FileRecord.album == self.name).filter(\n FileRecord.year == self.year).all()\n\n if len(artists) > 1:\n return \"Various Artists\"\n elif len(artists) == 1:\n return artists[0][0]\n else:\n return \"(Unknown)\"", "def make_qualification(self, qualification_name: str) -> str:\n if qualification_name == \"\":\n raise MephistoDBException(\"Empty string is not a valid qualification name\")\n with self.table_access_condition, self._get_connection() as conn:\n c = conn.cursor()\n try:\n c.execute(\n \"INSERT INTO qualifications(qualification_name) VALUES (?);\",\n (qualification_name,),\n )\n qualification_id = str(c.lastrowid)\n return qualification_id\n except sqlite3.IntegrityError as e:\n if is_unique_failure(e):\n raise EntryAlreadyExistsException()\n raise MephistoDBException(e)", "def add_new_artwork():\n artist_name = get_artist_name()\n if not controls_utils.artist_already_in_db(artist_name):\n print('Artist not registered, creating new registration. ')\n email = get_artist_email()\n new_artist = Artist(artist_name, email)\n artwork_db.add_artist(new_artist)\n artwork_name = get_new_artwork_name()\n price = get_price()\n available = True\n new_artwork = Artwork(artist_name, artwork_name, price, available)\n artwork_db.add_artwork(new_artwork)", "def get_file_name(self):\n\n return \"%s - %s\" % (self.get_tags()[\"artist\"], self.get_tags()[\"title\"])", "def add_new_artist(artist_name):\n if controls_utils.artist_already_in_db(artist_name):\n print('This artist is already in database')\n\n else:\n artist_email = get_artist_email()\n new_artist = Artist(artist_name, artist_email)\n artwork_db.add_artist(new_artist)", "def try_create_uniqe_name(self,name=None,plan_id=None):\n if self.valid_name(name):\n for i in range (1,20):\n new_name=name+\"_\"+str(i)\n if self.unique_name(name=new_name,plan_id=plan_id):\n return new_name\n return False\n else:\n return False", "def artwork_pregenerator(request, elements, kw):\n artwork = kw.pop('artwork')\n kw['id'] = artwork.id\n # n.b.: this won't hurt anything if the route doesn't have {title}, so it's\n # calculated and thrown away. bad?\n if artwork.title:\n kw['title'] = '-' + _make_url_friendly(artwork.title)\n else:\n kw['title'] = ''\n\n return elements, kw", "def get_thumbnail_name(self, thumbnail_name, with_size=None):", "def name(self, obj, index=0):\n if hasattr(obj, 'id'):\n uid = obj.id.replace('material', 'm')\n else:\n self._namecount += 1\n uid = 'Untitled.' + str(self._namecount)\n base = '%s-%d' % (uid, index)\n if base not in self._names:\n self._namecount += 1\n self._names[base] = '%s-%.4d' % (base[:MAX_NAME_LENGTH], self._namecount)\n return self._names[base]", "def _gen_image_filename(instance, filename):\n # First, store the original filename in the model\n instance.original_filename = filename\n\n return _unique_path(instance.owner.pk, filename)", "def unique_together():\n # we'll add a suffix because we can't assume the index to be clean\n suffix = uuid4().hex\n\n click.secho('*** Creating the first Movie of a series...', fg='green')\n gf1 = _make_document(\n 'movie',\n title='The Godfather',\n series_title='The Godfather Trilogy - %s' % suffix,\n series_part=1\n )\n click.secho(json.dumps(gf1, indent=2, sort_keys=True), fg='yellow')\n\n click.secho('*** Creating the second Movie of a series...', fg='green')\n gf2 = _make_document(\n 'movie',\n title='The Godfather Part II',\n series_title='The Godfather Trilogy - %s' % suffix,\n series_part=2\n )\n click.secho(json.dumps(gf2, indent=2, sort_keys=True), fg='yellow')\n\n click.secho('*** Trying to create the third Movie with duplicated series_part...', fg='green')\n try:\n gf3 = _make_document(\n 'movie',\n title='The Godfather Part III',\n series_title='The Godfather Trilogy - %s' % suffix,\n series_part=2\n )\n except requests.HTTPError as e:\n click.secho(str(e), fg='red')\n click.secho(json.dumps(e.response.json(), indent=2, sort_keys=True), fg='yellow')\n else:\n click.secho(json.dumps(gf3, indent=2, sort_keys=True), fg='yellow')\n\n click.secho('*** Trying to create the third Movie with correct series_part...', fg='green')\n gf3 = _make_document(\n 'movie',\n title='The Godfather Part III',\n series_title='The Godfather Trilogy - %s' % suffix,\n series_part=3\n )\n click.secho(json.dumps(gf3, indent=2, sort_keys=True), fg='yellow')", "def test_get_slug_same_name_condition(self):\n\n book2 = Book.objects.create(\n category=self.category,\n language=self.language,\n level=self.level,\n name=\"Kitap denemesi\",\n description=\n \"Kitap açıklama denemesi aynı isimli kitap slug testi için\",\n author=self.author)\n test_slug = slugify(book2.name.replace(\"ı\", \"i\")) + \"-1\"\n self.assertEqual(test_slug, book2.slug)", "def testFilename(self):\n\t\tlala='test'\n\t\ta_ima = Picture(title=lala)\n\t\ta_nima = Picture()\n\n\t\tself.assertIs(a_ima.get_image_filename(), lala)\n\t\tself.assertIs(a_nima.get_image_filename(), '')", "def test_album_filename(self):\n\t\ttp = self.sess.query(sql.Post).join(sql.URL).filter(sql.Post.reddit_id == 't3_98crc8').first()\n\t\tfile = ng.choose_file_name(tp.urls[0], tp, sql.session(), album_size=1000)\n\t\tself.assertEqual('aww/album - (testuser2)/0001', file, msg='Failed to generate new Album foldername!')\n\n\t\tnp = self.sess.query(sql.Post).join(sql.URL).filter(sql.Post.reddit_id == 't3_awyf90').first()\n\t\tfile = ng.choose_file_name(np.urls[0], np, sql.session(), album_size=1)\n\t\tself.assertEqual('aww/album - (testuser2) - 2/1', file, msg='Failed to create separate album folder!')", "def mosaic_art_file_name(target_im):\n target_file_name = extract_file_name(target_im)\n now_dt = now_datetime()\n return '{0}_mosaic_{1}.png'.format(target_file_name, now_dt)", "def _make_name(self, name=None):\n\n if name:\n new_name = name.split(\"/\")[-1].split(\".png\")[0]\n if new_name.startswith((\"AWS-\", \"Amazon-\")):\n new_name = new_name.split(\"-\", 1)[1]\n # Replace non-alphanumeric with underscores (1:1 mapping)\n new_name = re.sub(r'\\W+', '_', new_name)\n return new_name", "def _create_key(item, duplicate_sources):\n if item[\"nom\"] not in duplicate_sources:\n return item[\"nom\"]\n dateref = item[\"date_ref\"]\n year = re.search(r\"\\d{4}\", dateref).group(0)\n return f\"{item['nom']}_{year}\"", "def test_display_name(self):\r\n def verify_name(source_usage_key, parent_usage_key, expected_name, display_name=None):\r\n usage_key = self._duplicate_item(parent_usage_key, source_usage_key, display_name)\r\n duplicated_item = self.get_item_from_modulestore(usage_key, draft=True)\r\n self.assertEqual(duplicated_item.display_name, expected_name)\r\n return usage_key\r\n\r\n # Display name comes from template.\r\n dupe_usage_key = verify_name(self.problem_usage_key, self.seq_usage_key, \"Duplicate of 'Multiple Choice'\")\r\n # Test dupe of dupe.\r\n verify_name(dupe_usage_key, self.seq_usage_key, \"Duplicate of 'Duplicate of 'Multiple Choice''\")\r\n\r\n # Uses default display_name of 'Text' from HTML component.\r\n verify_name(self.html_usage_key, self.seq_usage_key, \"Duplicate of 'Text'\")\r\n\r\n # The sequence does not have a display_name set, so category is shown.\r\n verify_name(self.seq_usage_key, self.chapter_usage_key, \"Duplicate of sequential\")\r\n\r\n # Now send a custom display name for the duplicate.\r\n verify_name(self.seq_usage_key, self.chapter_usage_key, \"customized name\", display_name=\"customized name\")", "def check_if_already_exists(list_name, title, description):\n\n for item in list_name:\n if item['title'] == title:\n return 'Sorry, This title has already been used in another question'\n if item['description'] == description:\n return 'Sorry, This description has already been used in another question'", "def unique_filename(data):\n file = data\n get_ext = file.filename.split(\".\")[-1]\n new_name = \"%s.%s\" % (uuid.uuid4().hex, get_ext)\n return new_name", "def test_auto_unique_slug(self):\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\")\n self.assertEqual(story.slug, \"test-story\")\n story2 = create_story(title=\"Test Story\", summary=\"Test Summary 2\",\n byline=\"Test Byline 2\")\n self.assertEqual(story2.slug, \"test-story-2\")\n self.assertEqual(Story.objects.filter(slug=\"test-story\").count(), 1)", "def test_name_already_exists(self) -> None:\n with pytest.raises(IntegrityError):\n ObservationType.add({'name': 'clear', 'units': 'mag',\n 'description': 'Un-filtered apparent magnitude.'})" ]
[ "0.73656523", "0.6096115", "0.6094721", "0.60884243", "0.5944674", "0.58501154", "0.58242446", "0.5798604", "0.56803983", "0.56430686", "0.56228995", "0.5541038", "0.5530707", "0.5527085", "0.5525762", "0.551446", "0.55008", "0.54562396", "0.5454412", "0.5428754", "0.54002196", "0.53752875", "0.5374978", "0.5364586", "0.533746", "0.533507", "0.5321743", "0.5319636", "0.53143585", "0.5305907" ]
0.7228978
1
returns artwork name already in db for accessing artwork for functions
def get_artwork_name(): artwork_name = input('Please enter title of artwork: ') if not controls_utils.artwork_name_is_unique(artwork_name): return artwork_name else: print('artwork not found')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def artwork_id(self) -> str:\n return self.relay(\"artwork_id\")", "def get_new_artwork_name():\n artwork_name = input('Please enter title of artwork: ')\n while not controls_utils.artwork_name_is_unique(artwork_name):\n print('Artwork name is taken')\n artwork_name = input('Please enter title of artwork: ')\n return artwork_name", "def getArtistsofArtwork(catalog, codes):\n return model.getArtistname(catalog,codes)", "def create_artwork(**kwargs):\n artwork = Artwork(**kwargs)\n db.session.add(artwork)\n db.session.commit()\n db.session.flush()\n\n return artwork", "def _add_artwork(self, artwork):\n\n insert_artwork = 'INSERT INTO artworks (artwork, price, artist_id, for_sale) VALUES (?, ?, ?, ?)'\n\n try:\n with sqlite3.connect(db_path) as conn:\n conn.execute(insert_artwork, (artwork.artwork, artwork.price, artwork.artist_id, artwork.for_sale))\n # new_id = res.lastrowid # Get the ID of the new row in the table\n # artwork.artist_id = new_id # Set this artist's ID\n conn.close()\n return True\n except sqlite3.IntegrityError as e:\n print(f'\\nError - Artwork with that name is already in the database.\\n', e)\n return False", "def get_artwork(request):\n\n\tartist = get_member(request.user)\n\tform = ArtworkForm(request.POST, request.FILES)\n\tart_type = request.POST.get('artwork_type', None)\n\tif form.is_valid():\n\t\tname = form.cleaned_data['name']\n\t\tprice = form.cleaned_data['price']\n\t\timage = form.cleaned_data['image']\n\n\treturn artist, name, art_type, price, image", "def _get_artist(self):\n artists = FileRecord.query(FileRecord.artist).distinct().filter(\n FileRecord.album == self.name).filter(\n FileRecord.year == self.year).all()\n\n if len(artists) > 1:\n return \"Various Artists\"\n elif len(artists) == 1:\n return artists[0][0]\n else:\n return \"(Unknown)\"", "def artwork_pregenerator(request, elements, kw):\n artwork = kw.pop('artwork')\n kw['id'] = artwork.id\n # n.b.: this won't hurt anything if the route doesn't have {title}, so it's\n # calculated and thrown away. bad?\n if artwork.title:\n kw['title'] = '-' + _make_url_friendly(artwork.title)\n else:\n kw['title'] = ''\n\n return elements, kw", "def get_artwork_modified(request, artwork):\n\tartist = get_member(request.user)\n\ttype = artwork.art_type\n\tdelete = False\n\tform = ArtworkForm(request.POST, request.FILES, initial={\"name\" : artwork.name,\\\n\t 'price' : artwork.price, 'image' : artwork.image})\n\tart_type = request.POST.get('artwork_type', None)\n\tif art_type!=type and artwork.state == 2:\n\t\tdelete_artwork_from_event(request, artwork)\n\t\tdelete = True\n\tif form.is_valid():\n\t\tname = form.cleaned_data['name']\n\t\tprice = form.cleaned_data['price']\n\t\timage = form.cleaned_data['image']\n\n\treturn artist, name, art_type, price, image, delete", "def save(image):\n keypoints, description = describe(image)\n artwork = {\n \"keypoints\": keypoints,\n \"description\": description,\n \"path\": image,\n \"date\": datetime.datetime.utcnow()\n }\n artwork_id = db.insert(artwork)\n print(artwork_id)", "def delete_artwork(artwork):\n artwork_db.delete_artwork(artwork)", "def test_artwork(self):\n # Create some art with a known tag\n user = sim.sim_user()\n artwork = sim.sim_artwork(user=user)\n tag = sim.sim_tag()\n artwork.tag_objs.append(tag)\n model.session.flush()\n\n # Ensure it shows in the tag's gallery\n res = self.app.get(self.url('tags.artwork', tag=tag))\n assert artwork.title in res", "def friendly_name(self):\n if not self.collection:\n print('YIKES no filepath %s %s' % (self.title,self.slug))\n print (self)\n return ''\n fpath = self.collection.filePath\n if '/' in fpath:\n bg = fpath.index('/')+1\n return fpath[bg:]+ '/' + self.fileName\n return fpath+'/'+self.fileName", "def get_file_name(self):\n\n return \"%s - %s\" % (self.get_tags()[\"artist\"], self.get_tags()[\"title\"])", "async def art(self, ctx, *, art: str):\n try:\n art = self.get_entry('Art', art.lower())\n except RuntimeError as e:\n return await ctx.send(e)\n\n name = art['Name']\n description = art['Description']\n weapon = art['Weapon']\n learned = art['Class/Rank']\n effect_range = art['Effect Range']\n attribute = art['Attribute']\n hits = art['Hits']\n scaling = art['Hit Scaling']\n cooldown = art['Cooldown']\n cooldown_bonus = art['Secondary/Tertiary']\n special_effects = art['Special Effects']\n aura_effects = art['Aura Effects']\n extra_effects = art['Extra Effects']\n additional_effects = art['Additional Effects']\n effects = art['Effects']\n aura = art['Aura']\n duration = art['Effect Duration']\n\n embed = discord.Embed(title=name)\n embed.set_thumbnail(url='attachment://art.png')\n embed.add_field(name='Weapon', value=weapon)\n embed.add_field(name='Learned', value=learned)\n embed.add_field(name='Effect Range', value=effect_range)\n if attribute:\n embed.add_field(name='Attribute', value=attribute)\n if hits:\n embed.add_field(name='Hits', value=hits)\n if scaling:\n embed.add_field(name='Hit Scaling', value=scaling)\n embed.add_field(name='Cooldown', value=cooldown)\n embed.add_field(name='Cooldown Bonus', value=cooldown_bonus)\n if special_effects:\n embed.add_field(name='Special Effects', value=special_effects,\n inline=False)\n if aura_effects:\n embed.add_field(name='Aura Effects', value=aura_effects,\n inline=False)\n if extra_effects:\n embed.add_field(name='Extra Effects', value=extra_effects,\n inline=False)\n if additional_effects:\n embed.add_field(name='Additional Effects',\n value=additional_effects)\n if effects:\n embed.add_field(name='Effects', value=effects)\n if aura:\n embed.add_field(name='Aura', value=aura)\n if duration:\n embed.add_field(name='Duration', value=duration)\n embed.add_field(name='Description', value=description, inline=False)\n await ctx.send(file=discord.File(f'xenox/arts/{name}.png', 'art.png'),\n embed=embed)", "def display_artist_complete_portfolio(artist_name):\n if controls_utils.artist_has_work_in_db(artist_name):\n results = artwork_db.get_all_artwork_from_one_artist(artist_name)\n for piece in results:\n print(piece)\n else:\n print('Sorry, no artwork from this artist to display ')", "def bb_artistname(hit):\n try:\n artist = hit.group(1)\n A = Artist.objects.get(handle=artist)\n T = loader.get_template('webview/t/artist.html')\n C = Context({'A' : A})\n return T.render(C)\n except:\n # This is normally thrown when the artist is invalid. Return the original result,\n # Only we add an icon to indicate an invalid artist.\n return '<img src=\"/static/user_error.png\" alt=\"artist\" border=\"0\" /> %s' % (artist)", "def ArtID(self, default=None):\n return self.data.get('artid', default)", "def ArtID(self, default=None):\n return self.data.get('artid', default)", "def display_all():\n results = artwork_db.get_all_artwork()\n for artist in results:\n print(artist)", "def LoadArtIntoDB(store,art):\n if 'srcorgname' in art and art['srcorgname'] is not None:\n srcorg = Misc.GetOrgID( art[ 'srcorgname' ] )\n else:\n # no publication specified - look up using domain name\n o = urlparse.urlparse(art['permalink'])\n domain = o[1].lower()\n srcorg = Publication.find_or_create(domain)\n art['srcorg'] = srcorg\n\n\n # resolve bylined authors to journo ids\n expected_journo = None\n authors = Byline.CrackByline(art['byline'])\n attributed = []\n for author in authors:\n attributed.append(Journo.find_or_create(author, art, expected_journo))\n art['journos'] = attributed\n\n# if opts.test:\n# ukmedia.PrettyDump( art )\n\n article_id = store.upsert( art )\n\n return article_id", "def display_artist_available_portfolio(artist_name):\n if controls_utils.artist_has_work_in_db(artist_name):\n results = artwork_db.get_available_artwork_from_one_artist(artist_name)\n if results:\n for piece in results:\n print(piece)\n else:\n print('Sorry this artist does not have any available art at this time ')\n else:\n print('Sorry, no artwork from this artist to display ')", "def get_artwork(session_, uri: str):\n return session_.query(CoverArt).filter_by(uri=uri).first()", "def get_name():", "def getname(self, name, ext):\n basename = \"Cro%s.%s\" % (name, ext)\n for fn in os.listdir(self.dbdir):\n if basename.lower() == fn.lower():\n return os.path.join(self.dbdir, fn)", "def add_new_artwork():\n artist_name = get_artist_name()\n if not controls_utils.artist_already_in_db(artist_name):\n print('Artist not registered, creating new registration. ')\n email = get_artist_email()\n new_artist = Artist(artist_name, email)\n artwork_db.add_artist(new_artist)\n artwork_name = get_new_artwork_name()\n price = get_price()\n available = True\n new_artwork = Artwork(artist_name, artwork_name, price, available)\n artwork_db.add_artwork(new_artwork)", "def scene_name():\n\n pass", "def find_artist_playlist(data):\n\n return data['artist'].lower() + '.m3u'", "def __getitem__(self, index):\n filename = self.content_dataset[index]\n image = Image.open(filename)\n content = self.content_transform(image)\n art_index = random.randint(0,self.art_num-1)\n filename,label = self.art_dataset[art_index]\n image = Image.open(filename)\n style = self.style_transform(image)\n return content,style,label", "def _get(self, thumbnail_name):\n raise NotImplementedError" ]
[ "0.6272623", "0.627238", "0.6110136", "0.6011075", "0.5951154", "0.59244585", "0.58771366", "0.5704116", "0.5623007", "0.55986863", "0.5502767", "0.5487516", "0.5482874", "0.5481453", "0.5475872", "0.5459583", "0.541917", "0.54038113", "0.54038113", "0.53657216", "0.535134", "0.5328685", "0.53234035", "0.53193694", "0.5295074", "0.52671707", "0.5262268", "0.525458", "0.5241915", "0.51985925" ]
0.7119328
0
checks if artist name is already registered and if not, registers them before adding new artwork
def add_new_artwork(): artist_name = get_artist_name() if not controls_utils.artist_already_in_db(artist_name): print('Artist not registered, creating new registration. ') email = get_artist_email() new_artist = Artist(artist_name, email) artwork_db.add_artist(new_artist) artwork_name = get_new_artwork_name() price = get_price() available = True new_artwork = Artwork(artist_name, artwork_name, price, available) artwork_db.add_artwork(new_artwork)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_new_artist(artist_name):\n if controls_utils.artist_already_in_db(artist_name):\n print('This artist is already in database')\n\n else:\n artist_email = get_artist_email()\n new_artist = Artist(artist_name, artist_email)\n artwork_db.add_artist(new_artist)", "def addArtist(p_filename, p_artist):\n filecheck(p_filename)\n f_cleanXList = getArtists(p_filename)\n # print(\"addArtist() f_cleanXList\\t\\t\", f_cleanXList)\n if p_artist in f_cleanXList:\n raise DuplicateDataError(\"file already contains this artist\")\n f_cleanXList.insert(0, p_artist)\n setArtists(p_filename, f_cleanXList)\n return", "def _add_artist(self, artist):\n\n insert_artist = 'INSERT INTO artists (name, email, artist_id) VALUES (?, ?, ?)'\n\n try:\n with sqlite3.connect(db_path) as conn:\n res = conn.execute(insert_artist, (artist.name, artist.email, artist.artist_id))\n new_id = res.lastrowid # Get the ID of the new row in the table\n artist.artist_id = new_id # Set this artist's ID\n conn.close()\n return True\n except sqlite3.IntegrityError:\n print(f'\\nError - Artist with that email is already in the database.\\n')\n return False", "def __add_artist(self, artist, genius_api):\n\t\tentry = {\n\t\t\t'id' : int(artist['id']),\n\t\t\t'name' : artist['name'].lower(),\n\t\t\t'is_verified' : artist['is_verified'],\n\t\t\t'url' : artist['url'],\n\t\t\t'songs' : genius_api.get_artist_songs_id(artist['id'], artist_name=artist['name'])\n\t\t\t}\n\t\t\t\t#Step 3: Insert Artist into MongoDB via isnert_one\n\t\tself.db.artists.insert_one(entry)", "def _add_artwork(self, artwork):\n\n insert_artwork = 'INSERT INTO artworks (artwork, price, artist_id, for_sale) VALUES (?, ?, ?, ?)'\n\n try:\n with sqlite3.connect(db_path) as conn:\n conn.execute(insert_artwork, (artwork.artwork, artwork.price, artwork.artist_id, artwork.for_sale))\n # new_id = res.lastrowid # Get the ID of the new row in the table\n # artwork.artist_id = new_id # Set this artist's ID\n conn.close()\n return True\n except sqlite3.IntegrityError as e:\n print(f'\\nError - Artwork with that name is already in the database.\\n', e)\n return False", "def store_artists(self, request):\n if request.url not in self.set_artist:\n with open('xiami_artist_%s' % self.time_stamp, 'a') as output:\n output.write(request.url + '\\n')\n self.set_artist.add(request.url)\n return None", "def find_artist(self):\n item = self.clementine_artists.currentItem()\n if not item:\n return\n self.artist_buffer = item\n search = item.text(0)\n self._parent.current_data = search\n if self.artist_map[item.text(0)]:\n ok = qtw.QMessageBox.question(self, self.appname, 'Artist already has a '\n 'match - do you want to reassign?',\n qtw.QMessageBox.Yes | qtw.QMessageBox.No,\n qtw.QMessageBox.No)\n if ok == qtw.QMessageBox.No:\n return\n self.artist_map[item.text(0)] = ''\n try:\n found = self.lookup[search]\n except KeyError:\n test = search.split(None, 1)\n if len(test) == 1:\n found = False\n else:\n search = test[1]\n try:\n found = self.lookup[search]\n except KeyError:\n found = False\n if found:\n find = self.albums_artists.findItems(found, core.Qt.MatchFixedString, 2)\n artists = []\n results = []\n for a_item in find: # only keep unmatched artists\n if a_item.text(2) in self.artist_map.values():\n continue\n results.append(a_item)\n artists.append(build_artist_name(a_item.text(0), a_item.text(1)))\n a_item = None\n selected, ok = qtw.QInputDialog.getItem(self, self.appname,\n 'Select Artist', artists,\n editable=False)\n if ok:\n a_item = results[artists.index(selected)]\n self.update_item(a_item, item)\n return\n\n self.add_artist()", "def add_artist(self):\n item = self.artist_buffer\n artistname = item.text(0) if item else ''\n dlg = NewArtistDialog(self, artistname).exec_()\n if dlg != qtw.QDialog.Accepted:\n return\n fname, lname = self.data\n if not item:\n result = self.clementine_artists.findItems(' '.join((fname, lname)),\n core.Qt.MatchFixedString, 0)\n if result:\n item = result[0]\n if not item:\n qtw.QMessageBox.information(self, self.appname, \"Artist doesn't \"\n \"exist on the Clementine side\")\n return\n\n a_item = None\n results = self.albums_artists.findItems(lname, core.Qt.MatchFixedString, 1)\n data = [build_artist_name(x.text(0), x.text(1)) for x in results]\n if results:\n selected, ok = qtw.QInputDialog.getItem(self, self.appname,\n 'Select Artist', data,\n editable=False)\n if ok:\n a_item = results[data.index(selected)]\n if not a_item:\n self.max_artist += 1\n a_item = qtw.QTreeWidgetItem([fname, lname, str(self.max_artist)])\n self.albums_artists.addTopLevelItem(a_item)\n self.new_artists.append(a_item)\n self.update_item(a_item, item)", "def test_single_track_blank_artist(self):\n self.add_mp3(set_artist=True, artist='')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('has no artist tag', status)\n self.assertEqual(self.get_album_count(), 0)", "def test_single_track_no_artist(self):\n self.add_mp3(set_artist=True)\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('has no artist tag', status)\n self.assertEqual(self.get_album_count(), 0)", "def add_artists(self, params):\n artists = params\n\n # Lists to load\n names = []\n for n in self.listIDs.keys():\n for a in artists:\n if 'artist:' + a in n:\n names.append(n)\n\n self.add_playlist(names)", "def set_artists(audio: EasyID3, artists):\r\n audio['artist'] = artists\r\n audio.save()", "def test_two_tracks_various_artists(self):\n self.add_mp3(filename='1.mp3')\n self.add_mp3(filename='2.mp3', set_artist=True, artist='Artist 2')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, True)\n self.assertEqual(self.get_album_count(), 1)\n album = Album.get_by_artist_album(self.app.curs, 'Various', 'Album')\n self.assertNotEqual(album, None)\n self.assertEqual(album.artist, 'Various')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.album_type, 'album')\n self.assertEqual(album.totalseconds, 4)\n self.assertEqual(album.totaltracks, 2)", "def new_artist( self ):\n\n artist_name_dialog = LineEditDialog( \"Artist name:\",\n \"New Artist Input\" )\n result = artist_name_dialog.exec_()\n\n # nothing to do if we were told this was an accident.\n if result == LineEditDialog.CANCELLED:\n print( \"Cancelled\" )\n return\n\n new_artist = artist_name_dialog.artist_edit.text().strip()\n\n if len( new_artist ) == 0:\n print( \"No artist was supplied. Ignoring.\" )\n return\n\n # see if this artist already exists in the database.\n try:\n self.db.new_artist( new_artist )\n except NameError:\n # XXX: better way to convey this\n print( \"'{:s}' is already in the database.\".format( new_artist ) )\n return\n\n # identify the new artist's position within the database's list and\n # update model to match.\n artists_list = self.db.get_artists()\n new_artist_index = artists_list.index( new_artist )\n\n self.artistsModel.insertRow( new_artist_index )\n self.artistsModel.setData( self.artistsModel.index( new_artist_index ),\n new_artist )", "def get_artwork_name():\n artwork_name = input('Please enter title of artwork: ')\n if not controls_utils.artwork_name_is_unique(artwork_name):\n return artwork_name\n else:\n print('artwork not found')", "def set_album_artist(audio: EasyID3, album_artist):\r\n audio['albumartist'] = album_artist\r\n audio.save()", "def get_new_artwork_name():\n artwork_name = input('Please enter title of artwork: ')\n while not controls_utils.artwork_name_is_unique(artwork_name):\n print('Artwork name is taken')\n artwork_name = input('Please enter title of artwork: ')\n return artwork_name", "def add_song(self, name, year, title):\n\n # Here we check if album exist under artist.\n album_found = find_object(name, self.albums)\n if album_found is None: # If there is no album found\n print(name + \"not found\") # we print \"Album name not found\n album_found = Album(name, year, self.name) # Change_3: Pass \"self.name\" instead of \"self\"\n self.add_album(album_found) # We add new_album to song.\n else: # if we found an existing album with same name\n print(\"found album\" + name) # we print found album name\n\n # so we add song to album_found\n album_found.add_song(title)", "def set_artist_song_entry(self, artist, song):\n self.artist_name.set_text(artist)\n self.song_name.set_text(song)", "def createartist(self, rkdartistsdocs, summary):\n\n langs = [u'de', u'en', u'es', u'fr', u'nl']\n\n data = {'labels': {},\n 'aliases': {},\n }\n kunstenaarsnaam = rkdartistsdocs.get('virtualFields').get('hoofdTitel').get('kunstenaarsnaam')\n if kunstenaarsnaam.get('label') == u'Voorkeursnaam':\n for lang in langs:\n data['labels'][lang] = {'language': lang, 'value': kunstenaarsnaam.get('contents')}\n\n spellingsvarianten = rkdartistsdocs.get('virtualFields').get('naamsvarianten').get('contents').get('spellingsvarianten').get('contents')\n aliases = []\n for spellingsvariant in spellingsvarianten:\n name = spellingsvariant\n if u',' in name:\n (surname, sep, firstname) = name.partition(u',')\n name = u'%s %s' % (firstname.strip(), surname.strip(),)\n aliases.append(name)\n if aliases:\n for lang in langs:\n data['aliases'][lang]=[]\n for alias in aliases:\n data['aliases'][lang].append({'language': lang, 'value': alias})\n\n print data\n\n priref = rkdartistsdocs.get('priref')\n\n identification = {}\n pywikibot.output(summary)\n\n # No need for duplicate checking\n result = self.repo.editEntity(identification, data, summary=summary)\n artistTitle = result.get(u'entity').get('id')\n\n # Wikidata is sometimes lagging. Wait for 10 seconds before trying to actually use the item\n time.sleep(10)\n\n artistItem = pywikibot.ItemPage(self.repo, title=artistTitle)\n\n # Add to self.artworkIds so that we don't create dupes\n self.currentrkd[priref]=artistTitle\n\n # Add human\n humanitem = pywikibot.ItemPage(self.repo,u'Q5')\n instanceclaim = pywikibot.Claim(self.repo, u'P31')\n instanceclaim.setTarget(humanitem)\n artistItem.addClaim(instanceclaim)\n\n # Add the id to the item so we can get back to it later\n newclaim = pywikibot.Claim(self.repo, u'P650')\n newclaim.setTarget(unicode(priref))\n pywikibot.output('Adding new RKDartists ID claim to %s' % artistItem)\n artistItem.addClaim(newclaim)\n\n # Force an update so everything is available for the next step\n artistItem.get(force=True)\n\n return artistItem", "def add_songs(self, artist_list):\n\n \"Terms that identify songs that aren't really songs\"\n conn = self.conn\n conn.text_factory = str\n c = conn.cursor()\n\n if artist_list is None:\n return \"You must provide a list of artists for whom to find songs.\"\n else:\n for artist in artist_list:\n print(\"Finding songs for \" + artist)\n all_songs_by_artist = pygenius_songs.findAllSongs(artist)\n already_scraped = list()\n for song in all_songs_by_artist:\n url = song[0]\n title = song[1]\n print(title)\n c.execute(\"SELECT count(*) FROM songs WHERE title = (?) AND artist = (?)\", (title, artist))\n check_in_db = c.fetchall()\n if check_in_db[0][0] == 0:\n if title not in already_scraped:\n if not [i for i, x in enumerate(self.bad_terms) if x in title]:\n already_scraped.append(title)\n c.execute('INSERT INTO songs(title, artist, url) values (?,?,?)', (title, artist, url))\n conn.commit()", "def _get_artist(self):\n artists = FileRecord.query(FileRecord.artist).distinct().filter(\n FileRecord.album == self.name).filter(\n FileRecord.year == self.year).all()\n\n if len(artists) > 1:\n return \"Various Artists\"\n elif len(artists) == 1:\n return artists[0][0]\n else:\n return \"(Unknown)\"", "def test_artist(self):\n a = self.d.artist(1)\n self.assertEqual(a.name, 'Persuader, The')", "def query_artist(self, artist_name):\n # Check if the artist is in the database\n try:\n a = Artist.objects.get(searched_name = artist_name)\n return ArtistObject(searched_name=artist_name, db_model=a)\n\n # If the artist isn't in the database, search Spotify\n except Artist.DoesNotExist:\n print(\"Artist not found in database: \", artist_name)\n result = self.search_spotify(artist_name)\n\n # If there were any errors searching Spotify, don't continue\n if result is None:\n # SKIP FOR TEST NOW\n ArtistObject(searched_name=artist_name).save_to_db()\n return None\n else:\n artist = self.filter_spotify_result(result, artist_name)\n\n if artist is None:\n ArtistObject(searched_name=artist_name).save_to_db()\n return None\n\n top_tracks = self.find_top_tracks(artist.uri)\n artist.add_songs(top_tracks)\n\n # Save artist URI to the database for faster results next time\n print(\"About to save artist with name {} and object: \".format(artist_name), artist)\n artist.save_to_db()\n\n return artist", "def test_single_track_artist_too_long(self):\n self.add_mp3(set_artist=True, artist='z'*(App.max_artist_album_length+10))\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('is longer than', status)\n self.assertEqual(self.get_album_count(), 0)", "def artist_second_pass(self):\n log.debug(\"Called artist_second_pass for %s.\" % self.name)\n gen = consecutive_groups(self.name_clean)\n _min = 100\n cutoff = matching(self.name_clean)\n sp_artist_min, sp_artist_uri_min = None, None\n self.success = False\n\n for splitter in splitters:\n if splitter in self.name_clean:\n for sub in self.name_clean.split(splitter):\n yt_artist = sub.rstrip().lower()\n for i in gen:\n potential = \" \".join(i)\n results = self.sp.search(q='artist:' + potential, type='artist', limit=2)\n items = results['artists']['items']\n if len(items) > 0:\n artist = items[0]\n sp_artist = artist['name'].lower()\n sp_uri = artist['uri']\n lev = levenshtein(sp_artist, yt_artist)\n if _min > lev:\n sp_artist_min = sp_artist\n sp_artist_uri_min = sp_uri\n _min = lev\n\n if _min <= cutoff:\n log.debug(\"Method artist_second_pass succeeded for %s.\" % self.name)\n self.artist = sp_artist_min\n self.artist_uri = sp_artist_uri_min\n self.success = True\n else:\n log.debug(\"Method artist_second_pass failed for %s.\" % self.name)\n self.success = False", "def test_artist_name_exception(self):\n with mock.patch(\"spotlogin_api.get_top_call\", self.mock_key):\n result = spotify_login.get_top_artists(self.user[INPUT])\n self.assertEqual(result, [])", "def set_artist(self, artist: str) -> None:\n self.artist = artist\n # Rebuild the song's search query to include the artist defined.\n self.query_accuracy = 0\n self.__generate_search_query()", "def appendArtist(song):\n\tsql = []\n\t\n\tsql.append(\"INSERT INTO ARTIST ('name') VALUES ('\" \n\t+ '/'.join(song.artist) + \"');\")\n\t\n\tsql.append(\"INSERT INTO songs_artist ('songs_id', 'artist_id')\"\n\t+ \" VALUES ((select id from songs where hash = '\" + str(song.hash) + \"'), \"\n\t+ \"(select id from artist where name = '\" + '/'.join(song.artist) + \"'));\")\n\t\n\treturn sql", "def test_adding_album_twice(self):\n self.add_mp3(filename='1.mp3')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, True)\n self.assertEqual(self.get_album_count(), 1)\n\n self.add_mp3(filename='2.mp3')\n (added, status) = self.app.add_album(self.filenames, 'ep')\n self.assertEqual(added, False)\n self.assertIn('Would update to', status)\n self.assertEqual(self.get_album_count(), 1)\n\n album = Album.get_by_artist_album(self.app.curs, 'Artist', 'Album')\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.album_type, 'album')\n self.assertEqual(album.totalseconds, 2)\n self.assertEqual(album.totaltracks, 1)" ]
[ "0.7512231", "0.70247054", "0.6972556", "0.6826065", "0.6521868", "0.6520903", "0.6467225", "0.64457726", "0.63808656", "0.6248312", "0.6247259", "0.620168", "0.6199984", "0.6120154", "0.60596985", "0.6055652", "0.60216147", "0.59817195", "0.59527135", "0.5930472", "0.58660907", "0.5830208", "0.5808155", "0.5766556", "0.57047784", "0.5672525", "0.5636983", "0.56089807", "0.5602721", "0.56024396" ]
0.71755606
1
adds new artist to db if they are not already in
def add_new_artist(artist_name): if controls_utils.artist_already_in_db(artist_name): print('This artist is already in database') else: artist_email = get_artist_email() new_artist = Artist(artist_name, artist_email) artwork_db.add_artist(new_artist)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_artist(self, artist):\n\n insert_artist = 'INSERT INTO artists (name, email, artist_id) VALUES (?, ?, ?)'\n\n try:\n with sqlite3.connect(db_path) as conn:\n res = conn.execute(insert_artist, (artist.name, artist.email, artist.artist_id))\n new_id = res.lastrowid # Get the ID of the new row in the table\n artist.artist_id = new_id # Set this artist's ID\n conn.close()\n return True\n except sqlite3.IntegrityError:\n print(f'\\nError - Artist with that email is already in the database.\\n')\n return False", "def __add_artist(self, artist, genius_api):\n\t\tentry = {\n\t\t\t'id' : int(artist['id']),\n\t\t\t'name' : artist['name'].lower(),\n\t\t\t'is_verified' : artist['is_verified'],\n\t\t\t'url' : artist['url'],\n\t\t\t'songs' : genius_api.get_artist_songs_id(artist['id'], artist_name=artist['name'])\n\t\t\t}\n\t\t\t\t#Step 3: Insert Artist into MongoDB via isnert_one\n\t\tself.db.artists.insert_one(entry)", "def adds_new_artists_to_db_by_en_id(yt_playlist_query):\n # yt_playlist_query returned by gets_playlist_history(en_playlist), api_helper.py\n\n for item in yt_playlist_query:\n en_artist_id = item['en_artist_id']\n is_en_artist_id_in_db = db.session.query(exists().where(Artist.en_artist_id==en_artist_id)).scalar()\n if is_en_artist_id_in_db == False:\n artist_info = Artist(en_artist_id=en_artist_id,\n artist_name=item['artist_name'])\n db.session.add(artist_info)\n db.session.flush", "def add_new_artwork():\n artist_name = get_artist_name()\n if not controls_utils.artist_already_in_db(artist_name):\n print('Artist not registered, creating new registration. ')\n email = get_artist_email()\n new_artist = Artist(artist_name, email)\n artwork_db.add_artist(new_artist)\n artwork_name = get_new_artwork_name()\n price = get_price()\n available = True\n new_artwork = Artwork(artist_name, artwork_name, price, available)\n artwork_db.add_artwork(new_artwork)", "def addArtist(p_filename, p_artist):\n filecheck(p_filename)\n f_cleanXList = getArtists(p_filename)\n # print(\"addArtist() f_cleanXList\\t\\t\", f_cleanXList)\n if p_artist in f_cleanXList:\n raise DuplicateDataError(\"file already contains this artist\")\n f_cleanXList.insert(0, p_artist)\n setArtists(p_filename, f_cleanXList)\n return", "def appendArtist(song):\n\tsql = []\n\t\n\tsql.append(\"INSERT INTO ARTIST ('name') VALUES ('\" \n\t+ '/'.join(song.artist) + \"');\")\n\t\n\tsql.append(\"INSERT INTO songs_artist ('songs_id', 'artist_id')\"\n\t+ \" VALUES ((select id from songs where hash = '\" + str(song.hash) + \"'), \"\n\t+ \"(select id from artist where name = '\" + '/'.join(song.artist) + \"'));\")\n\t\n\treturn sql", "def _add_artwork(self, artwork):\n\n insert_artwork = 'INSERT INTO artworks (artwork, price, artist_id, for_sale) VALUES (?, ?, ?, ?)'\n\n try:\n with sqlite3.connect(db_path) as conn:\n conn.execute(insert_artwork, (artwork.artwork, artwork.price, artwork.artist_id, artwork.for_sale))\n # new_id = res.lastrowid # Get the ID of the new row in the table\n # artwork.artist_id = new_id # Set this artist's ID\n conn.close()\n return True\n except sqlite3.IntegrityError as e:\n print(f'\\nError - Artwork with that name is already in the database.\\n', e)\n return False", "def store_artists(self, request):\n if request.url not in self.set_artist:\n with open('xiami_artist_%s' % self.time_stamp, 'a') as output:\n output.write(request.url + '\\n')\n self.set_artist.add(request.url)\n return None", "def add_songs(self, artist_list):\n\n \"Terms that identify songs that aren't really songs\"\n conn = self.conn\n conn.text_factory = str\n c = conn.cursor()\n\n if artist_list is None:\n return \"You must provide a list of artists for whom to find songs.\"\n else:\n for artist in artist_list:\n print(\"Finding songs for \" + artist)\n all_songs_by_artist = pygenius_songs.findAllSongs(artist)\n already_scraped = list()\n for song in all_songs_by_artist:\n url = song[0]\n title = song[1]\n print(title)\n c.execute(\"SELECT count(*) FROM songs WHERE title = (?) AND artist = (?)\", (title, artist))\n check_in_db = c.fetchall()\n if check_in_db[0][0] == 0:\n if title not in already_scraped:\n if not [i for i, x in enumerate(self.bad_terms) if x in title]:\n already_scraped.append(title)\n c.execute('INSERT INTO songs(title, artist, url) values (?,?,?)', (title, artist, url))\n conn.commit()", "def update_artist_set():\n conn = psycopg2.connect(\"dbname=artistqdb host=localhost user=postgres\")\n cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\n cur.execute(\"\"\"insert into confirmed_artists (artist)\n select artist\n from scrobbles\n group by artist\n having count(distinct song) > 2\"\"\")\n # TODO: Figure out how to not insert duplicates (like, \"where not exists\")\n\n # Remove any duplicates\n cur.execute(\"\"\"delete from confirmed_artists as l\n using confirmed_artists as r\n where l.artist = r.artist\n and l.id > r.id\"\"\")", "def add_song_to_database(artist, name, db):\n if exists(db):\n f = open(db, 'r+')\n song_list = pickle.load(f)\n current_entry = Song_data(artist, name);\n if current_entry.id in [previous_entry.id for previous_entry in song_list]:\n print str(current_entry) + \" already in database.\"\n return\n song_list.append(current_entry)\n f.seek(0,0)\n pickle.dump(song_list, f)\n else:\n f = open(db, 'w')\n song_list = [Song_data(artist, name)]\n f.seek(0,0)\n pickle.dump(song_list, f)", "def add_artist(self):\n item = self.artist_buffer\n artistname = item.text(0) if item else ''\n dlg = NewArtistDialog(self, artistname).exec_()\n if dlg != qtw.QDialog.Accepted:\n return\n fname, lname = self.data\n if not item:\n result = self.clementine_artists.findItems(' '.join((fname, lname)),\n core.Qt.MatchFixedString, 0)\n if result:\n item = result[0]\n if not item:\n qtw.QMessageBox.information(self, self.appname, \"Artist doesn't \"\n \"exist on the Clementine side\")\n return\n\n a_item = None\n results = self.albums_artists.findItems(lname, core.Qt.MatchFixedString, 1)\n data = [build_artist_name(x.text(0), x.text(1)) for x in results]\n if results:\n selected, ok = qtw.QInputDialog.getItem(self, self.appname,\n 'Select Artist', data,\n editable=False)\n if ok:\n a_item = results[data.index(selected)]\n if not a_item:\n self.max_artist += 1\n a_item = qtw.QTreeWidgetItem([fname, lname, str(self.max_artist)])\n self.albums_artists.addTopLevelItem(a_item)\n self.new_artists.append(a_item)\n self.update_item(a_item, item)", "def add_artist_to_db(artist_id, session):\n # type: (six.stringtypes, Any) -> None\n logger.info('adding artist {} to db'.format(artist_id))\n with musicbrainz_lock:\n artist_info = musicbrainzngs.get_artist_by_id(artist_id)['artist']\n\n artist = Artist(name=artist_info['name'],\n musicbrainz_id=artist_id,\n status=Status.Wanted)\n session.add(artist)\n\n release_groups = get_release_groups_for_artist(artist.musicbrainz_id)\n\n for group_info in release_groups:\n logger.debug('found {type} {name}'.format(type=group_info['type'], name=ensure_unicode(group_info['title'])))\n album = Album(title=ensure_unicode(group_info['title']),\n musicbrainz_id=group_info['id'],\n type=group_info['type'],\n artist=artist,\n status=Status.Wanted\n )\n\n session.add(album)\n\n releases = get_releases_for_release_group(album.musicbrainz_id)\n for release_info in releases:\n add_album_and_tracks_to_db(album, release_info, session)\n\n # Chose oldest release (it's usually the original release)\n chosen_release = session.query(Release).join(Album).filter(Album.musicbrainz_id == group_info['id']).order_by(\n Release.release_date.asc()).first()\n if chosen_release:\n chosen_release.is_selected = True\n\n with write_lock:\n session.commit()", "def new_artist( self ):\n\n artist_name_dialog = LineEditDialog( \"Artist name:\",\n \"New Artist Input\" )\n result = artist_name_dialog.exec_()\n\n # nothing to do if we were told this was an accident.\n if result == LineEditDialog.CANCELLED:\n print( \"Cancelled\" )\n return\n\n new_artist = artist_name_dialog.artist_edit.text().strip()\n\n if len( new_artist ) == 0:\n print( \"No artist was supplied. Ignoring.\" )\n return\n\n # see if this artist already exists in the database.\n try:\n self.db.new_artist( new_artist )\n except NameError:\n # XXX: better way to convey this\n print( \"'{:s}' is already in the database.\".format( new_artist ) )\n return\n\n # identify the new artist's position within the database's list and\n # update model to match.\n artists_list = self.db.get_artists()\n new_artist_index = artists_list.index( new_artist )\n\n self.artistsModel.insertRow( new_artist_index )\n self.artistsModel.setData( self.artistsModel.index( new_artist_index ),\n new_artist )", "def add_lyrics_and_song_data_to_database(artist, song):\n if exists('song_database.txt'):\n f = open('song_database.txt', 'r+')\n song_list = pickle.load(f)\n current_entry = Song_data(artist, song)\n if current_entry.id in [previous_entry.id for previous_entry in song_list]:\n print \"Song '\" + song + \"' already in database.\"\n return\n song_list.append(current_entry)\n f.seek(0,0)\n pickle.dump(song_list, f)\n else:\n f = open('song_database.txt', 'w')\n song_list = [Song_data(artist, song)]\n f.seek(0,0)\n pickle.dump(song_list, f)", "def test_single_track_no_artist(self):\n self.add_mp3(set_artist=True)\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('has no artist tag', status)\n self.assertEqual(self.get_album_count(), 0)", "def set_artists(audio: EasyID3, artists):\r\n audio['artist'] = artists\r\n audio.save()", "def test_single_track_blank_artist(self):\n self.add_mp3(set_artist=True, artist='')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('has no artist tag', status)\n self.assertEqual(self.get_album_count(), 0)", "def add_frequently_played_artists_to_queue():\n conn = psycopg2.connect(\"dbname=artistqdb host=localhost user=postgres\")\n cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\n # Determine whether I like an artist enough to add them to my artist queue\n # Heads up, the rules used here are pretty arbitrary\n cur.execute(\"\"\"insert into artist_queue (artist, last_scrobble_date)\n SELECT artist,\n max(scrobble_date)\n from scrobbles\n where scrobble_date>now()-interval '30' day\n GROUP BY artist\n having (count(*) > 15\n and count(distinct song) > 10)\n or (count(distinct song) > 30)\"\"\")\n\n # Delete any older duplicates\n cur.execute(\"\"\"delete from artist_queue as l\n using artist_queue as r\n where l.artist = r.artist\n and l.id < r.id\"\"\")\n\n # Make the changes persistent in the database and end communications\n conn.commit()\n cur.close()\n conn.close()", "def test_two_tracks_various_artists(self):\n self.add_mp3(filename='1.mp3')\n self.add_mp3(filename='2.mp3', set_artist=True, artist='Artist 2')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, True)\n self.assertEqual(self.get_album_count(), 1)\n album = Album.get_by_artist_album(self.app.curs, 'Various', 'Album')\n self.assertNotEqual(album, None)\n self.assertEqual(album.artist, 'Various')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.album_type, 'album')\n self.assertEqual(album.totalseconds, 4)\n self.assertEqual(album.totaltracks, 2)", "def add_artists(self, params):\n artists = params\n\n # Lists to load\n names = []\n for n in self.listIDs.keys():\n for a in artists:\n if 'artist:' + a in n:\n names.append(n)\n\n self.add_playlist(names)", "def create_artist_submission():\n\n # called upon submitting the new artist listing form\n # TODO: insert form data as a new Venue record in the db, instead\n # TODO: modify data to be the data object returned from db insertion\n\n try:\n name = request.form.get(\"name\")\n city = request.form.get(\"city\")\n state = request.form.get(\"state\")\n phone = request.form.get(\"phone\")\n imageLink = request.form.get(\"image_link\")\n genres = request.form.getlist(\"genres\")\n facebookLink = request.form.get(\"facebook_link\")\n website = request.form.get(\"website\")\n seeking_venue = request.form.get(\"seeking_venue\")\n seeking_description = request.form.get(\"seeking_description\")\n \n artist_to_add = Artist(\n name=name,\n city=city,\n state=state,\n phone=phone,\n image_link=imageLink,\n genres=genres,\n facebook_link=facebookLink,\n website=website,\n seeking_venue=seeking_venue,\n seeking_description=seeking_description,\n )\n\n db.session.add(artist_to_add)\n db.session.commit()\n\n # on successful db insert, flash success\n flash(\"Artist \" + request.form[\"name\"] + \" was successfully listed!\")\n\n except:\n flash(\"An error occurred. Artist \" + name + \" could not be listed.\")\n db.session.rollback()\n finally:\n db.session.close()\n\n return render_template(\"pages/home.html\")", "def __insert_artist_data(cur, df):\n artist_data = (\n df.artist_id.values[0],\n df.artist_name.values[0],\n df.artist_location.values[0],\n (df.artist_latitude.values[0]).item(),\n (df.artist_longitude.values[0]).item()\n )\n cur.execute(artist_table_insert, artist_data)", "def query_artist(self, artist_name):\n # Check if the artist is in the database\n try:\n a = Artist.objects.get(searched_name = artist_name)\n return ArtistObject(searched_name=artist_name, db_model=a)\n\n # If the artist isn't in the database, search Spotify\n except Artist.DoesNotExist:\n print(\"Artist not found in database: \", artist_name)\n result = self.search_spotify(artist_name)\n\n # If there were any errors searching Spotify, don't continue\n if result is None:\n # SKIP FOR TEST NOW\n ArtistObject(searched_name=artist_name).save_to_db()\n return None\n else:\n artist = self.filter_spotify_result(result, artist_name)\n\n if artist is None:\n ArtistObject(searched_name=artist_name).save_to_db()\n return None\n\n top_tracks = self.find_top_tracks(artist.uri)\n artist.add_songs(top_tracks)\n\n # Save artist URI to the database for faster results next time\n print(\"About to save artist with name {} and object: \".format(artist_name), artist)\n artist.save_to_db()\n\n return artist", "def adds_new_songs_to_db_by_en_id(yt_playlist_query):\n # yt_playlist_query returned by gets_playlist_history(en_playlist), api_helper.py\n\n for item in yt_playlist_query:\n en_song_id = item['en_song_id']\n is_en_song_id_in_db = db.session.query(exists().where(Song.en_song_id == en_song_id)).scalar()\n if is_en_song_id_in_db == False:\n en_artist_id = item['en_artist_id']\n artist_id = db.session.query(Artist.artist_id).filter(Artist.en_artist_id == en_artist_id).one()\n song_info = Song(en_song_id=en_song_id,\n song_title=item['song_title'],\n artist_id=artist_id)\n db.session.add(song_info)\n db.session.flush", "def __add_song(self, song, genius_api):\n\t\tentry = {\n\t\t\t'id' : int(song['id']),\n\t\t\t'title' : song['title'],\n\t\t\t'primary_artist' : {\n\t\t\t\t'id' : song['primary_artist']['id'],\n\t\t\t\t'name' : str(song['primary_artist']['name']).lower(),\n\t\t\t\t'url' : song['primary_artist']['url'],\n\t\t\t\t'is_verified' : song['primary_artist']['is_verified'],\n\t\t\t\t},\n\t\t\t'url' : song['url'],\n\t\t\t'lyrics' : genius_api.get_lyrics(song['id'], song['url'])\n\t\t\t}\n\t\tif song['album']:\n\t\t\tentry['album'] = {\n\t\t\t\t'id': song['album']['id'], \n\t\t\t\t'full_title': song['album']['full_title'], \n\t\t\t\t'name': song['album']['name'], \n\t\t\t\t'artist': song['album']['artist']['id']\n\t\t\t\t}\n\t\tif song['release_date']:\n\t\t\tentry['release_date'] = song['release_date']\n\t\tif len(song['featured_artists']) > 0:\n\t\t\tfeatured_artists = list()\n\t\t\tfor artist in song['featured_artists']:\n\t\t\t\tart = {\n\t\t\t\t\t'id' : artist['id'],\n\t\t\t\t\t'name' : artist['name'].lower()\n\t\t\t\t\t}\n\t\t\t\tfeatured_artists.append(art)\n\t\t\tentry['featured_artists'] = featured_artists\n\t\t\t#Step 3: Insert Artist into MongoDB via isnert_one\n\t\tself.db.songs.insert_one(entry)", "def loadArtists(catalog):\n tagsfile = cf.data_dir + 'MoMA/Artists-utf8-small.csv'\n input_file = csv.DictReader(open(tagsfile, encoding='utf-8'))\n for artist in input_file:\n model.addArtist(catalog, artist)", "def loadArtists(catalog):\n artistsfile = cf.data_dir + 'MoMA/Artists-utf8-10pct.csv'\n input_file = csv.DictReader(open(artistsfile, encoding='utf-8'))\n for artist in input_file:\n model.addArtist(catalog, artist)", "def test_adding_album_twice_forced(self):\n self.add_mp3(filename='1.mp3')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, True)\n self.assertEqual(self.get_album_count(), 1)\n\n self.add_mp3(filename='2.mp3')\n (added, status) = self.app.add_album(self.filenames, 'ep', force_update=True)\n self.assertEqual(added, True)\n self.assertIn('Updated to', status)\n self.assertEqual(self.get_album_count(), 1)\n\n album = Album.get_by_artist_album(self.app.curs, 'Artist', 'Album')\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.album_type, 'ep')\n self.assertEqual(album.totalseconds, 4)\n self.assertEqual(album.totaltracks, 2)", "async def add_img(self, ctx: BBContext, url: str, artist: Optional[discord.User] = None):\n\n art = Art(url, artist.id, artist.name) if artist else Art(url)\n con = await ctx.get_connection()\n query = f'INSERT INTO {TABLE_ARTS}(url, artist_id, artist_name) VALUES($1, $2, $3)'\n\n await con.execute(query, art.url, art.artist_id, art.artist_name)\n await ctx.tick(True)" ]
[ "0.8128571", "0.7432734", "0.72047126", "0.7142562", "0.7089367", "0.6940522", "0.6874244", "0.687114", "0.68703157", "0.6851918", "0.6840053", "0.67764634", "0.67681867", "0.6488752", "0.6475834", "0.6470941", "0.6462443", "0.64317393", "0.62935674", "0.62439895", "0.62268573", "0.61097", "0.60714495", "0.60532784", "0.60518754", "0.60471106", "0.60363364", "0.5909237", "0.5891066", "0.5886774" ]
0.77281874
1
checks if artist is in db, if so displays their works that are unsold
def display_artist_available_portfolio(artist_name): if controls_utils.artist_has_work_in_db(artist_name): results = artwork_db.get_available_artwork_from_one_artist(artist_name) if results: for piece in results: print(piece) else: print('Sorry this artist does not have any available art at this time ') else: print('Sorry, no artwork from this artist to display ')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_artist_complete_portfolio(artist_name):\n if controls_utils.artist_has_work_in_db(artist_name):\n results = artwork_db.get_all_artwork_from_one_artist(artist_name)\n for piece in results:\n print(piece)\n else:\n print('Sorry, no artwork from this artist to display ')", "def _get_artist(self):\n artists = FileRecord.query(FileRecord.artist).distinct().filter(\n FileRecord.album == self.name).filter(\n FileRecord.year == self.year).all()\n\n if len(artists) > 1:\n return \"Various Artists\"\n elif len(artists) == 1:\n return artists[0][0]\n else:\n return \"(Unknown)\"", "def display_all():\n results = artwork_db.get_all_artwork()\n for artist in results:\n print(artist)", "def show_artist(artist_id):\n\n result = db.session.query(Artist).filter(Artist.id == artist_id)\n result = result[0]\n\n past_shows_count = 0\n upcoming_shows_count = 0\n\n past_shows = []\n upcoming_shows = []\n\n all_shows = Shows.query.all()\n\n print(all_shows)\n\n for show in all_shows:\n if show.artist_id == result.id:\n show_time = datetime.strptime(show.start_time, '%Y-%m-%d %H:%M:%S')\n if show_time > datetime.now() :\n upcoming_shows.append(show)\n else: \n past_shows.append(show)\n \n past_shows_count = len(past_shows)\n upcoming_shows_count = len(upcoming_shows)\n\n resdata = {\n \"id\": result.id,\n \"name\": result.name,\n \"genres\": json.loads(result.genres),\n \"city\": result.city,\n \"state\": result.state,\n \"phone\": result.phone,\n \"website\": result.website,\n \"facebook_link\": result.facebook_link,\n \"seeking_venue\": result.seeking_venue,\n \"seeking_description\": result.seeking_description,\n \"image_link\": result.image_link,\n \"past_shows\": past_shows,\n \"upcoming_shows\": upcoming_shows,\n \"past_shows_count\": past_shows_count,\n \"upcoming_shows_count\": upcoming_shows_count,\n }\n\n data = list(filter(lambda d: d[\"id\"] == artist_id, [resdata]))[0]\n return render_template(\"pages/show_artist.html\", artist=data)", "def cheer(self, songs):\n if self.favourite_song in songs:\n return \"Whoo!\"", "def featured(request):\n\n artist = Artist.objects.all()\n\n \n\n context = {\n 'artist': artist,\n }\n\n return render(request, 'artist/featured.html', context)", "def add_songs(self, artist_list):\n\n \"Terms that identify songs that aren't really songs\"\n conn = self.conn\n conn.text_factory = str\n c = conn.cursor()\n\n if artist_list is None:\n return \"You must provide a list of artists for whom to find songs.\"\n else:\n for artist in artist_list:\n print(\"Finding songs for \" + artist)\n all_songs_by_artist = pygenius_songs.findAllSongs(artist)\n already_scraped = list()\n for song in all_songs_by_artist:\n url = song[0]\n title = song[1]\n print(title)\n c.execute(\"SELECT count(*) FROM songs WHERE title = (?) AND artist = (?)\", (title, artist))\n check_in_db = c.fetchall()\n if check_in_db[0][0] == 0:\n if title not in already_scraped:\n if not [i for i, x in enumerate(self.bad_terms) if x in title]:\n already_scraped.append(title)\n c.execute('INSERT INTO songs(title, artist, url) values (?,?,?)', (title, artist, url))\n conn.commit()", "def get_artist_tracks():\n fh = open(\"data.csv\")\n info = csv.DictReader(fh)\n #prompt user for an artist name\n user_input = input(\"type an artist: \")\n songs = []\n for entries in info:\n if user_input in entries['Artist'].lower():\n song_entry = entries['Track Name']\n if song_entry not in songs:\n # add songs that aren't in the song list so you only get unique values\n songs.append(song_entry)\n #print the list of songs from an artist\n print(songs)", "def procesArtist(self, rkdartistsdocs):\n summary = u''\n if rkdartistsdocs.get('priref') in self.currentrkd:\n pywikibot.output(u'Already got %s on %s' % (rkdartistsdocs.get('priref'),\n self.currentrkd.get(rkdartistsdocs.get('priref'))))\n return False\n print rkdartistsdocs.get('priref')\n number = -1\n if rkdartistsdocs.get(u'results_in_other_databases') and \\\n rkdartistsdocs.get(u'results_in_other_databases').get(u'images_kunstenaar').get('count'):\n number = rkdartistsdocs.get(u'results_in_other_databases').get(u'images_kunstenaar').get('count')\n if number > 25:\n summary = u'Creating artist based on RKD: Artist has more than 25 works in RKDimages'\n return self.createartist(rkdartistsdocs, summary)\n # Focus on painters here\n if 'schilder' in rkdartistsdocs.get('kwalificatie'):\n if 'Koninklijke subsidie voor vrije schilderkunst' in rkdartistsdocs.get('winnaar_van_prijs'):\n summary = u'Creating artist based on RKD: Painter won the Royal Prize for Painting'\n return self.createartist(rkdartistsdocs, summary)\n # Could add more prizes here\n if number > 5:\n summary = u'Creating artist based on RKD: Painter has more than 5 works in RKDimages'\n return self.createartist(rkdartistsdocs, summary)\n if number > 0 and rkdartistsdocs.get('geboortedatum_begin') and rkdartistsdocs.get('geboorteplaats'):\n summary = u'Creating artist based on RKD: Painter with works in RKDimages and date and place of birth known'\n return self.createartist(rkdartistsdocs, summary)\n #summary = u'Stresstest'\n #return self.createartist(rkdartistsdocs, summary)\n # Create remaining people who won something\n if rkdartistsdocs.get('winnaar_van_prijs') and len(rkdartistsdocs.get('winnaar_van_prijs')) > 0:\n if len(rkdartistsdocs.get('winnaar_van_prijs'))==1:\n summary = u'Creating artist based on RKD: Person won the prize \"%s\"' % (rkdartistsdocs.get('winnaar_van_prijs')[0],)\n elif len(rkdartistsdocs.get('winnaar_van_prijs'))==2:\n summary = u'Creating artist based on RKD: Person won the prizes \"%s\" & \"%s\"' % (rkdartistsdocs.get('winnaar_van_prijs')[0],\n rkdartistsdocs.get('winnaar_van_prijs')[1],)\n else:\n summary = u'Creating artist based on RKD: Person won %s prizes including \"%s\" & \"%s\"' % (len(rkdartistsdocs.get('winnaar_van_prijs')),\n rkdartistsdocs.get('winnaar_van_prijs')[0],\n rkdartistsdocs.get('winnaar_van_prijs')[1],)\n return self.createartist(rkdartistsdocs, summary)\n return None", "def mark_as_sold(artwork):\n artwork_db.update_artwork(artwork)\n print(artwork + ' has been marked as sold. ')", "def _update_availability(self, artwork):\n\n if not artwork.artist_id:\n raise ArtworkError('Book does not have ID, can\\'t update')\n\n query_update_availability = 'UPDATE artworks SET for_sale = ? WHERE artwork = ?'\n\n with sqlite3.connect(db_path) as conn:\n updated = conn.execute(query_update_availability, (artwork.for_sale,))\n rows_modified = updated.rowcount\n conn.close()\n\n if rows_modified == 0:\n raise ArtworkError(f'Artwork with name {artwork} not found.')", "def all_artwork(request):\n all_unsold = Artwork.objects.filter(status=Artwork.FOR_SALE).order_by('-likes')\n all_sold = Artwork.objects.filter(status=Artwork.SOLD)\n\n \"\"\" Creating the likes dictionary \"\"\"\n likes_dictionary = {}\n if request.user:\n likes = Like.objects.all()\n for like in likes:\n likes_dictionary[like.artwork_id] = True\n return render(request, 'picture/all_artwork.html',\n {'artwork_sale': all_unsold, 'artwork_sold': all_sold,\n 'likes': likes_dictionary})", "async def artists(self, ctx: BBContext):\n\n query = \"\"\"SELECT DISTINCT artist_name, COUNT(*)\n FROM extras.arts\n WHERE artist_name IS NOT NULL\n GROUP BY artist_name\n ORDER BY COUNT(*) DESC\"\"\"\n\n args = [query]\n\n con = await ctx.get_connection()\n data: List[asyncpg.Record] = await con.fetch(*args)\n view = ArtsLeaderboardPagination(data, ctx.author) # type: ignore (Direct messages intent is not being used so author can only be a member)\n await view.start(ctx.channel)", "def getArtistsofArtwork(catalog, codes):\n return model.getArtistname(catalog,codes)", "def search_artists(self, entropy, bits):\n replace_artist = []\n offset_list = list(range(0, bits-50, 50)) + [bits-50]\n new_offset = offset_list[len(offset_list)-1]\n \n for offset in offset_list:\n artists = self.spotify.search(q='a', type='artist', offset=offset, limit=50)\n for artist in progressbar.progressbar(artists['artists']['items']):\n if artist['name'] not in self.artist_list and self.search_tracks(artist['name'], entropy):\n self.artist_list = artist['name']\n self.artist_table = [artist['name']]\n elif artist['name'] not in self.artist_list and artist['name'] not in replace_artist:\n replace_artist.append(artist['name'])\n size = len(self.artist_list) + len(replace_artist)\n\n if size < bits:\n for i in range(bits-size):\n replace_artist.append('Padding')\n \n while len(replace_artist) > 0:\n new_offset = new_offset + len(replace_artist)\n artists = self.spotify.search(q='a', type='artist', offset=new_offset, limit=50)\n for artist in artists['artists']['items']:\n if artist['name'] not in self.artist_list and self.search_tracks(artist['name'], entropy):\n del replace_artist[0]\n self.artist_list = artist['name']\n self.artist_table = [artist['name']]\n elif artist['name'] not in self.artist_list and artist['name'] not in replace_artist: \n del replace_artist[0]\n replace_artist.append(artist['name'])", "def artists():\n # TODO: replace with real data returned from querying the database (DONE)\n artists = Artist.query.group_by(Artist.id, Artist.name).all()\n\n data = []\n\n for a in artists :\n data.append({\n 'id' : a.id,\n 'name' : a.name\n })\n\n return render_template(\"pages/artists.html\", artists=data)", "def find_artist(self):\n item = self.clementine_artists.currentItem()\n if not item:\n return\n self.artist_buffer = item\n search = item.text(0)\n self._parent.current_data = search\n if self.artist_map[item.text(0)]:\n ok = qtw.QMessageBox.question(self, self.appname, 'Artist already has a '\n 'match - do you want to reassign?',\n qtw.QMessageBox.Yes | qtw.QMessageBox.No,\n qtw.QMessageBox.No)\n if ok == qtw.QMessageBox.No:\n return\n self.artist_map[item.text(0)] = ''\n try:\n found = self.lookup[search]\n except KeyError:\n test = search.split(None, 1)\n if len(test) == 1:\n found = False\n else:\n search = test[1]\n try:\n found = self.lookup[search]\n except KeyError:\n found = False\n if found:\n find = self.albums_artists.findItems(found, core.Qt.MatchFixedString, 2)\n artists = []\n results = []\n for a_item in find: # only keep unmatched artists\n if a_item.text(2) in self.artist_map.values():\n continue\n results.append(a_item)\n artists.append(build_artist_name(a_item.text(0), a_item.text(1)))\n a_item = None\n selected, ok = qtw.QInputDialog.getItem(self, self.appname,\n 'Select Artist', artists,\n editable=False)\n if ok:\n a_item = results[artists.index(selected)]\n self.update_item(a_item, item)\n return\n\n self.add_artist()", "def artist():\n\tif not request.vars.id:\n\t\tredirect(URL('index'))\n\tid = request.vars.id\n\tartistname = db.executesql(\"select m1.name from artist_name as m1, artist as m2 where m1.id = m2.name and m2.id = \"+id+\";\")\n\turls = db.executesql(\"select distinct(m2.url) from l_artist_url m1, url m2 where m2.id = m1.entity1 and m1.entity0 = \"+id+\";\")\n\tdiscography = db.executesql(\"select m4.name,m5.name,m3.id,m6.count from artist_credit_name m1, artist_credit m2,release_group m3,release_name m4, release_group_primary_type m5,rel_group_count m6 where m4.id = m3.name and m3.artist_credit = m2.id and m2.id = m1.artist_credit and m5.id = m3.type and m6.id = m3.id and m1.artist = \"+id+\";\")\n\tlinks = []\n\twiki = \"\"\n\tfor url in urls:\n\t\tif \"wikipedia\" in url[0]:\n\t\t\twiki = url[0]\n\t\telse:\n\t\t\tlinks.append(url[0])\n\treturn dict(discography=discography, wiki=wiki, links=links, artistname=artistname)", "def test_artist_name_exception(self):\n with mock.patch(\"spotlogin_api.get_top_call\", self.mock_key):\n result = spotify_login.get_top_artists(self.user[INPUT])\n self.assertEqual(result, [])", "def process_artist(self, artist):\n\t\t# stripping featured artists. Most lyric sites have lyrics listed under main artist\n\t\tif \"Featuring\" in artist:\n\t\t\tartist = artist[:artist.index(\"Featuring\")]\n\t\t# looking for '&' to reduce artist name to singular from composite\n\t\tif \" & \" in artist:\n\t\t\tartist = artist[:artist.index(\" & \")]\n\t\t# collab sign seems to be A x B\n\t\tif \" x \" in artist:\n\t\t\tartist = artist[:artist.index(\" x \")]\n\t\treturn artist", "def query_artist(self, artist_name):\n # Check if the artist is in the database\n try:\n a = Artist.objects.get(searched_name = artist_name)\n return ArtistObject(searched_name=artist_name, db_model=a)\n\n # If the artist isn't in the database, search Spotify\n except Artist.DoesNotExist:\n print(\"Artist not found in database: \", artist_name)\n result = self.search_spotify(artist_name)\n\n # If there were any errors searching Spotify, don't continue\n if result is None:\n # SKIP FOR TEST NOW\n ArtistObject(searched_name=artist_name).save_to_db()\n return None\n else:\n artist = self.filter_spotify_result(result, artist_name)\n\n if artist is None:\n ArtistObject(searched_name=artist_name).save_to_db()\n return None\n\n top_tracks = self.find_top_tracks(artist.uri)\n artist.add_songs(top_tracks)\n\n # Save artist URI to the database for faster results next time\n print(\"About to save artist with name {} and object: \".format(artist_name), artist)\n artist.save_to_db()\n\n return artist", "def update_artist_set():\n conn = psycopg2.connect(\"dbname=artistqdb host=localhost user=postgres\")\n cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\n cur.execute(\"\"\"insert into confirmed_artists (artist)\n select artist\n from scrobbles\n group by artist\n having count(distinct song) > 2\"\"\")\n # TODO: Figure out how to not insert duplicates (like, \"where not exists\")\n\n # Remove any duplicates\n cur.execute(\"\"\"delete from confirmed_artists as l\n using confirmed_artists as r\n where l.artist = r.artist\n and l.id > r.id\"\"\")", "def create_checkfile(artists):\n with open('./data/checkfile.txt', 'w') as checkfile:\n for new_artist in artists:\n for new_album in new_artist.albums:\n for new_song in new_album.tracks:\n print('{0.name}\\t{1.name}\\t{1.year}\\t{2.title}'.format(new_artist, new_album, new_song), file=checkfile)", "def artist():\n\n if request.method == \"GET\":\n return render_template(\"/artist.html\")\n\n else:\n # initialise the variables from the hidden html form input\n type = request.form.get(\"type\")\n url = request.form.get(\"url\")\n thumb = request.form.get(\"thumb\")\n\n # Authorization header to be embedded into the url \n headers = {\n 'Authorization': 'Discogs token=mqjXUBBzjnqrjUkKFIrOPAmlEZsGoDXjkRZgnRIR'\n }\n\n # search the database for artist information\n artists = requests.get(\"%s\" % url, headers=headers)\n artist = artists.json()\n\n # set variable if user is selecting pagination\n goto = request.form.get(\"goto\")\n\n if goto == None:\n\n # search the database for artists releases\n releases = requests.get(\"%s/releases?per_page=50\" % url, headers=headers)\n release = releases.json()\n\n # retreiving useful data\n data = release[\"releases\"]\n pagination = release[\"pagination\"]\n pages = pagination[\"pages\"]\n page = pagination[\"page\"]\n \n\n return render_template(\"/artist.html\",artist=artist, data=data, artistThumb=thumb, page=page, pages=pages, pagination=pagination, type=type, url=url, thumb=thumb)\n\n else:\n\n # search the database for artists releases goto page\n releases = requests.get(\"%s\" % goto, headers=headers)\n release = releases.json()\n\n # retreiving useful data\n data = release[\"releases\"]\n pagination = release[\"pagination\"]\n pages = pagination[\"pages\"]\n page = pagination[\"page\"]\n \n\n return render_template(\"/artist.html\",artist=artist, data=data, artistThumb=thumb, page=page, pages=pages, pagination=pagination, type=type, url=url, thumb=thumb)", "def search_artists():\n # TODO: implement search on artists with partial string search. Ensure it is case-insensitive. (DONE)\n\n response = request.form.get('search_term', '')\n response = response.lower()\n\n artists = db.session.query(Artist).filter(Artist.name.ilike('%' + response + '%')).all()\n results = []\n \n for a in artists:\n print(a.name)\n results.append({\n 'id': a.id,\n 'name' : a.name\n })\n\n response={\n \"count\": len(results),\n \"data\": results\n }\n\n return render_template(\n \"pages/search_artists.html\",\n results=response,\n search_term=request.form.get(\"search_term\", \"\"),\n )", "def test_single_track_no_artist(self):\n self.add_mp3(set_artist=True)\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('has no artist tag', status)\n self.assertEqual(self.get_album_count(), 0)", "def get_artist_info(self, artist):\n self.clear()\n ai = self._artist_info(artist)\n if ai != 0:\n return ai\n else:\n if 'the ' in artist:\n ai = self._artist_info(artist.replace('the ', ''))\n else:\n ai = self._artist_info('the ' + artist)\n if ai == 0:\n if 'junior' in artist:\n ai = self._artist_info(artist.replace('junior', 'jr.'))\n if ai == 0:\n if '-' in artist:\n ai = self._artist_info(artist.replace('-', ' '))\n return ai", "def get_artists_from_songs(self):\n\t\tsongs = self.db.songs.find({ \"featured_artists\": { \"$exists\": \"true\" }})\n\t\tartists_from_songs = self.get_primary_artists_from_songs(songs)\n\t\tfor _ , song in enumerate(songs):\n\t\t\tfor artist in song[\"featured_artists\"]:\n\t\t\t\tif artist['id'] not in artists_from_songs:\n\t\t\t\t\tprint(artist['name'])\n\t\t\t\t\tartists_from_songs.extend(artist['id'])\n\t\treturn artists_from_songs", "def test_artist(self):\n a = self.d.artist(1)\n self.assertEqual(a.name, 'Persuader, The')", "def change_availability():\n artwork_sold = get_artwork_name()\n if not controls_utils.artwork_exists(artwork_sold):\n print('No record of that piece of art. ')\n else:\n artist = controls_utils.name_of_artist(artwork_sold)\n if not controls_utils.artwork_available(artwork_sold, artist):\n print('Sorry that piece has already been sold. ')\n else:\n response = input('Mark ' + artwork_sold + ' as sold? Y or N ')\n if response.upper() == 'Y':\n mark_as_sold(artwork_sold)\n while not controls_utils.response_affirmative(response):\n response = input('Are you sure you want to mark '\n + artwork_sold + ' by ' + artist + ' as sold? Y or N or press X to escape ')\n if response.upper() == 'X':\n break\n elif response.upper() == 'N':\n break" ]
[ "0.629018", "0.6007671", "0.5936472", "0.58785075", "0.5646522", "0.54772884", "0.5399925", "0.5379606", "0.53653336", "0.5363873", "0.53454787", "0.5341607", "0.53369594", "0.53170526", "0.531617", "0.5293473", "0.5213106", "0.5198957", "0.51982343", "0.51897067", "0.51827306", "0.5145342", "0.51162225", "0.5115878", "0.5115216", "0.50919104", "0.5077367", "0.50659394", "0.50506234", "0.50448585" ]
0.6492745
0
gets name of artwork to delete and checks to make sure before deleting
def get_artwork_to_delete(): artwork = input('Which artwork would you like to delete? ') while not controls_utils.artwork_exists(artwork): artwork = input('Which artwork would you like to delete? ') artist = controls_utils.name_of_artist(artwork) response = input('Are you sure you want to delete ' + artwork + ' by ' + artist + ' ? Y or N') if response.upper() == 'Y': delete_artwork(artwork) while not controls_utils.response_affirmative(response): response = input('Are you sure you want to delete ' + artwork + ' by ' + artist + ' ? Y or N or press X to escape ') if response.upper() == 'X': break elif response.upper() == 'N': break
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_artwork(artwork):\n artwork_db.delete_artwork(artwork)", "def delete_thumbnail(self, thumbnail_name):", "def get_artwork_name():\n artwork_name = input('Please enter title of artwork: ')\n if not controls_utils.artwork_name_is_unique(artwork_name):\n return artwork_name\n else:\n print('artwork not found')", "def delete_image(Name=None):\n pass", "def delImg(img_name):\n img = Image.objects.raw({\"_id\": img_name}).first()\n img.delete()\n return", "def delete(self, *args, **kwargs):\n\t\tself.emo_img.delete(False)\n\t\tsuper(Emotion, self).delete(*args, **kwargs)", "def delete_entry(title):\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n default_storage.delete(filename)", "def get_new_artwork_name():\n artwork_name = input('Please enter title of artwork: ')\n while not controls_utils.artwork_name_is_unique(artwork_name):\n print('Artwork name is taken')\n artwork_name = input('Please enter title of artwork: ')\n return artwork_name", "def del_image(self, name):\r\n if self.images is None or name not in self.images:\r\n return\r\n l = self.images\r\n self.images = None\r\n l.setdefault('/empties/', [])\r\n # push the number on the empties list\r\n l['/empties/'].append(l[name])\r\n del l[name]\r\n self.images = l", "def get_available_name(self, name, max_length=None):\n # If the filename already exists, remove it as if it was a true file system\n if self.exists(name):\n os.remove(os.path.join(settings.MEDIA_ROOT, name))\n return name", "def delete(self, *args, **kwargs):\n self.image.delete()\n super(Recipe, self).delete(*args, **kwargs)", "def test_delete_image(self):\n pass", "def delete(self, *args, **kwargs):\n\n user_n=str(self.sujeto.user.pk)\n img_name=str(self.sujeto.pk)\n \n file_path=settings.MEDIA_ROOT+self.path[len('/media'):]\n\n os.remove(file_path)\n super(img_to_show, self).delete(*args, **kwargs)", "def delete_song(self):\r\n song_id = tuple(input(\"Give the melody id to be deleted:\\t\"))\r\n sql = \"SELECT file_title, form FROM songs WHERE id = %s\" # Check existence of song with given ID\r\n self.cursor.execute(sql, song_id)\r\n result = self.cursor.fetchall()\r\n if len(result) > 0:\r\n path = self.p_storage + \"/\" + result[0][0] + \".\" + result[0][\r\n 1] # Find path of song by appending the name and format to the storage directory path\r\n os.remove(path) # Remove song from directory\r\n sql = \"DELETE FROM songs WHERE id = %s\" # Delete song from database\r\n self.cursor.execute(sql, song_id)\r\n self.cnx.commit()\r\n print(self.cursor.rowcount, \"record(s) deleted\")\r\n else:\r\n print(\"Give a valid id...\")", "def delete_image_builder(Name=None):\n pass", "def test_delete_collection_image(self):\n pass", "def auto_delete_image_lecture_on_delete(sender, instance, **kwargs):\n if instance.file:\n instance.file.delete(save=False)", "def test_thumbnail(self):\n pub = PublicationFactory(thumbnail__filename=\"tester.jpg\")\n self.assertEqual(\n pub.thumbnail.url, f\"/media/reading/publications/{pub.slug}/tester.jpg\"\n )\n self.assertTrue(\n pub.thumbnail.path.endswith, f\"/reading/publications/{pub.slug}/tester.jpg\"\n )\n\n # Tidy up:\n pub.thumbnail.delete()", "def photo_edit_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.upload.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def delete_meal():", "def __on_delete(self):\n self.image.delete()", "def __on_delete(self):\n self.image.delete()", "def sorl_delete(**kwargs):\n from sorl.thumbnail import delete\n delete(kwargs['file'])", "def get_available_name(self, name, max_length):\n # If the filename already exists, remove it as if it was a true file system\n if self.exists(name):\n os.remove(os.path.join(settings.MEDIA_ROOT, name))\n return super(OverwriteStorage, self).get_available_name(name, max_length)", "def delete(self, name):\n path = self.directory / f\"{name}.yaml\"\n if path.exists():\n path.unlink()", "def delete():", "def test_before_delete(self, create_with_upload):\n name = \"test.txt\"\n resource = create_with_upload(\n \"hello world\",\n name,\n name=name,\n package_id=factories.Dataset()[\"id\"],\n )\n plugin = p.get_plugin(\"cloudstorage\")\n uploader = plugin.get_resource_uploader(resource)\n assert uploader.get_url_from_filename(resource[\"id\"], name)\n\n helpers.call_action(\"resource_delete\", id=resource[\"id\"])\n assert uploader.get_url_from_filename(resource[\"id\"], name) is None", "def delete_playlist(self, playlist_name):\n print(\"deletes_playlist needs implementation\")", "def remove(name):", "def photo_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.path.url[1:]\n if os.path.exists(filename):\n os.remove(filename)" ]
[ "0.74340343", "0.6875369", "0.6488465", "0.6230092", "0.61193043", "0.60223025", "0.600303", "0.59973776", "0.5964071", "0.5895225", "0.5831127", "0.58218324", "0.5801476", "0.5795376", "0.5749312", "0.57488483", "0.5737199", "0.5734383", "0.5707277", "0.5698352", "0.5694252", "0.5694252", "0.5692533", "0.5687944", "0.56840926", "0.5675754", "0.56385756", "0.5628324", "0.56125754", "0.55986416" ]
0.72529954
1
after confirming the artwork exists and is not already sold, changes status to sold
def change_availability(): artwork_sold = get_artwork_name() if not controls_utils.artwork_exists(artwork_sold): print('No record of that piece of art. ') else: artist = controls_utils.name_of_artist(artwork_sold) if not controls_utils.artwork_available(artwork_sold, artist): print('Sorry that piece has already been sold. ') else: response = input('Mark ' + artwork_sold + ' as sold? Y or N ') if response.upper() == 'Y': mark_as_sold(artwork_sold) while not controls_utils.response_affirmative(response): response = input('Are you sure you want to mark ' + artwork_sold + ' by ' + artist + ' as sold? Y or N or press X to escape ') if response.upper() == 'X': break elif response.upper() == 'N': break
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mark_as_sold(artwork):\n artwork_db.update_artwork(artwork)\n print(artwork + ' has been marked as sold. ')", "def _update_availability(self, artwork):\n\n if not artwork.artist_id:\n raise ArtworkError('Book does not have ID, can\\'t update')\n\n query_update_availability = 'UPDATE artworks SET for_sale = ? WHERE artwork = ?'\n\n with sqlite3.connect(db_path) as conn:\n updated = conn.execute(query_update_availability, (artwork.for_sale,))\n rows_modified = updated.rowcount\n conn.close()\n\n if rows_modified == 0:\n raise ArtworkError(f'Artwork with name {artwork} not found.')", "def sell(self):\n self.status = \"sold\"\n return self", "def buy_artwork(request):\n follow_on_page = \"picture/all_artwork.html\"\n if \"follow\" in request.GET:\n follow_on_page = request.GET.get('follow')\n artwork = Artwork.objects.get(id=request.GET.get('id'))\n artwork.status = 'sold'\n artwork.save()\n return HttpResponseRedirect(follow_on_page)", "def confirm_inventory(self, data, batch): # not used will be deprecated todo\n try:\n batch = batch\n data = data\n location = self.Location.find(['name', '=', 'MyInventory'])[-1]\n inventory = self.Inventory.find([('batch_number', '=', batch), ('location', '=', location.id)])[-1]\n lines = inventory.lines\n for i in data:\n product = \\\n self.Product.find(\n [('code', '=', i['code']), ('description', '=', 'Stock'), ('type', '=', 'goods')])[\n -1]\n supplier = self.Party.find(['name', '=', i['supplier']])[-1]\n for j in lines:\n if j.product == product:\n pro = j.product\n template = pro.template\n template.list_price = Decimal(i['rate'])\n template.save()\n pro.save()\n j.quantity = float(i['quantity'])\n j.supplier = supplier\n j.expiry_date = i['expiry_date']\n j.save()\n inventory.state = 'done'\n inventory.save()\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def auto_update_stock(self, ctx):\n woo_instance_id = ctx.get('woo_instance_id', False)\n instance = self.woo_instance_id.browse(woo_instance_id)\n if not instance:\n return True\n self.update_stock(instance, instance.last_inventory_update_time)\n return True", "def restock_book(self, isbn, quantity):\n self.cursor.execute(\"\"\"SELECT COUNT(*) FROM book WHERE ISBN=%s\"\"\", (isbn,))\n if self.cursor.fetchone()[0]:\n self.cursor.execute(\"\"\"UPDATE book set stock=stock+%s WHERE ISBN=%s\"\"\", (quantity, isbn))\n self.db.commit()\n return True\n return False", "def check_stock(self):\n if self.quantity > self.item.quantity:\n return \"%s Please adjust your cart.\" % CartItem.get_insufficient_stock_msg(self.item.quantity)\n return None", "def Trading(Seller,Buyer):\n if Seller.has_sold == False:\n if Buyer.like_buy >= Seller.like_sell:\n Seller.has_sold = True\n Buyer.has_bought = True\n Seller.sold_objects += 1\n Buyer.bought_objects += 1\n print('A trade has been made')\n else:\n Buyer.has_bought = False\n Seller.has_sold = False\n print('There was no deal')\n else:\n Buyer.has_bought = False", "def test_add_to_cart_item_not_in_system(self):\n # test sale products not in db\n\n response = self.client.get(\n '/self.base_url/sales/1999/2',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"This product does not exist\")\n self.assertEqual(response.status_code,200)\n\n\n # test add item which is at minimum stock", "def ingredient_used(self, item, quantity):\n logger.info('ReleaseDiscard ingredient used initiated')\n try:\n quantity = Decimal(quantity).quantize(Decimal('0.11'))\n inventory_list = self.Inventory.search([('location', '=', self.kitchen.id)]\n , order=[('batch_number', 'ASC')])\n product = self.Product.search([('name', '=', item),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n done = False\n today = date.today()\n for i in inventory_list:\n for j in i.lines:\n if j.product.template.name == item:\n expiry = j.expiry_date\n if expiry:\n if expiry >= today:\n if Decimal(j.quantity) >= Decimal(quantity):\n j.quantity = Decimal(j.quantity) - Decimal(quantity)\n self.move(from_location=self.kitchen, to_location=self.used, item=product,\n quantity=quantity,\n batch_number=i.batch_number)\n self.store_inventory(location=self.used, inventory_stock=j,\n quantity=quantity, batch=i.batch_number)\n j.save()\n self.check_and_delete(i)\n done = True\n else:\n quantity = Decimal(quantity) - Decimal(j.quantity)\n self.move(from_location=self.kitchen, to_location=self.used, item=product,\n quantity=j.quantity, batch_number=i.batch_number)\n self.store_inventory(location=self.used, inventory_stock=j,\n quantity=j.quantity, batch=i.batch_number)\n j.quantity = 0\n j.save()\n self.check_and_delete(i)\n # transaction.cursor.commit()\n i.save()\n if done:\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def check(self, context):\n self.update_product_size()\n return True", "def check(self, context):\n self.update_product_size()\n return True", "def _update_existing_cart_article(self, items, **kwargs):", "def cart_update(self, **kwargs):\n product_tmpl_id = kwargs.get('product_template_id')\n if product_tmpl_id and request.env['product.template'].browse(int(product_tmpl_id)).event_set_ok:\n kwargs.update(express=True)\n return super(WebsiteSaleEventSet, self).cart_update(**kwargs)", "def test_is_active(self):\n\n self.sold.health = 0\n self.assertFalse(self.sold.is_active)", "async def stocks(self, ctx):\n\t\tpass", "def test_add_with_end_shelf_life(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-1\", \n \"3\", \"2020-12-1\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def verify_done():\n if SAVE_EXISTENT == []:\n print \"\\nCan't generate the invoice because You have not bought\"\n press_enter()\n reset()\n show_products()\n sell_products()\n else:\n reset()\n invoice()\n press_enter()\n delete_lists()\n reset()\n main_menu()", "def on_sold(self, full_price: int, my_action: SalesmanAction, seconds_action: SalesmanAction,\n thirds_action: SalesmanAction):", "def stock_processor(id, price, title, remaining, totalPackCount, preorder, start, proxy, headers):\n\n r = request_pack_stock(proxy, headers)\n packs = r['data']['searchPackListings']['data']['searchSummary']['data']['data']\n\n for pack in packs:\n item = [pack['id'], pack['title'], pack['price'], pack['remaining'], pack['totalPackCount'], pack['preorder']]\n #print(f'\\n\\nITEM:{item}\\n\\n')\n if pack['remaining'] == remaining: #change back to !=\n # Checks if it already exists in our instock\n if checker(item):\n pass\n else:\n # Add to instock dict\n INSTOCK.append(item)\n print(f'\\n\\nINSTOCK:{INSTOCK}\\n\\n')\n # Send a notification to the discord webhook with the in-stock product\n if start == 0:\n print('Sending new Notification')\n print(item)\n discord_webhook(item)\n logging.info(msg='Successfully sent Discord notification')\n\n else:\n if checker(item):\n INSTOCK.remove(item)", "def run(self):\n USER.info('%s: Checking For Updates', self.recipe.name)\n cur_hash = pakit.conf.IDB[self.recipe.name]['hash']\n if cur_hash == self.recipe.repo.src_hash:\n return\n\n try:\n self.save_old_install()\n InstallTask(self.recipe).run()\n USER.info('%s: Deleting Old Install', self.recipe.name)\n Command('rm -rf ' + self.back_dir).wait()\n except Exception as exc: # pylint: disable=broad-except\n logging.error(exc)\n self.restore_old_install()", "def test_update_inventory(self):\n pass", "def ingredient_used_canceled(self, item, quantity):\n logger.info('ReleaseDiscard ingredient used canceled initiated')\n try:\n quantity = Decimal(quantity).quantize(Decimal('0.11'))\n inventory_list = self.Inventory.search([('location', '=', self.used.id)]\n , order=[('batch_number', 'DESC')])\n product = self.Product.search([('name', '=', item),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n done = False\n today = date.today()\n for i in inventory_list:\n for j in i.lines:\n if j.product.template.name == item:\n expiry = j.expiry_date\n if expiry:\n if expiry >= today:\n # pdb.set_trace()\n if Decimal(j.quantity) >= Decimal(quantity):\n j.quantity = Decimal(j.quantity) - Decimal(quantity)\n self.move(from_location=self.used, to_location=self.kitchen, item=product,\n quantity=quantity,\n batch_number=i.batch_number)\n self.store_inventory(location=self.kitchen, inventory_stock=j,\n quantity=quantity, batch=i.batch_number)\n j.save()\n self.check_and_delete(i)\n done = True\n else:\n quantity = Decimal(quantity) - Decimal(j.quantity)\n self.move(from_location=self.used, to_location=self.kitchen, item=product,\n quantity=j.quantity, batch_number=i.batch_number)\n self.store_inventory(location=self.kitchen, inventory_stock=j,\n quantity=j.quantity, batch=i.batch_number)\n j.quantity = 0\n j.save()\n self.check_and_delete(i)\n # transaction.cursor.commit()\n i.save()\n if done:\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def test_add_stock_item(self):\n pass", "def sell():\n return apology(\"TODO\")", "def update_price_model(self, good, order_type, is_successful, clearing_price=0):\n\n SIGNIFICANT = 0.25 # 25% more or less is \"significant\"\n SIG_IMBALANCE = 0.33\n LOW_INVENTORY = 0.1 # 10% of ideal inventory = \"LOW\"\n HIGH_INVENTORY = 2.0 # 200% of ideal inventory = \"HIGH\"\n MIN_PRICE = 0.01 # lowest allowed price of a Good\n\n if is_successful:\n # add this trade to the observed trading range\n self.observed_trading_range[good].append(clearing_price)\n\n public_mean_price = self.market.mean_price(good)\n belief = self.price_belief[good]\n mean = belief.mean()\n wobble = 0.05 # the degree which the Pop should bid outside the belief\n\n # how different the public mean price is from the price belief\n delta_to_mean = mean - public_mean_price\n\n if is_successful:\n if order_type is OrderType.buy_order and delta_to_mean > SIGNIFICANT:\n # this Pop overpaid, shift belief towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n elif order_type is OrderType.sell_order and delta_to_mean < -SIGNIFICANT:\n # this Pop underpaid!, shift belief towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n # increase the belief's certainty\n belief.low += wobble * mean\n belief.high -= wobble * mean\n\n else:\n # shift towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n # check for inventory special cases\n stocks = self.inventory.get_amount(good)\n ideal = self.inventory.get_ideal(good)\n\n # if we're buying and inventory is too low\n # meaning we're desperate to buy\n if order_type is OrderType.buy_order and stocks < LOW_INVENTORY * ideal:\n wobble *= 2\n\n # if we're selling and inventory is too high\n # meaning we're desperate to sell\n elif order_type is OrderType.sell_order and stocks > HIGH_INVENTORY * ideal:\n wobble *= 2\n # all other cases\n else:\n sells = self.market.history.sell_orders.average(good, 1)\n buys = self.market.history.buy_orders.average(good, 1)\n\n # TODO: figure out why this is sometimes 0\n if sells + buys > 0:\n\n supply_vs_demand = (sells - buys) / (sells + buys)\n\n if supply_vs_demand > SIG_IMBALANCE or supply_vs_demand < -SIG_IMBALANCE:\n # too much supply? lower bid lower to sell faster\n # too much demand? raise price to buy faster\n\n new_mean = public_mean_price * (1 - supply_vs_demand)\n delta_to_mean = mean - new_mean\n\n # shift the price belief to the new price mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n\n # decrease belief's certainty since we've just changed it (we could be wrong)\n belief.low -= wobble * mean\n belief.high += wobble * mean\n\n # make sure the price belief doesn't decrease below the minimum\n if belief.low < MIN_PRICE:\n belief.low = MIN_PRICE\n elif belief.high < MIN_PRICE:\n belief.high = MIN_PRICE", "def test_update_product_to_not_selling(self):\n self._require_login(self.user1)\n post_data = {\n \"category\": {\n \"name\": \"deportes\",\n \"index\": 1\n },\n \"name\": \"Producto 1 modified\",\n \"description\": \"Descripcion de producto 1 modified\",\n \"selling\": False,\n \"price\": 60,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['name'], 'Producto 1 modified')\n self.assertEqual(response.data['description'], 'Descripcion de producto 1 modified')\n self.assertEqual(response.data['selling'], False)\n self.assertEqual(response.data['price'], '60.0')\n self.assertEqual(response.data['category']['name'], 'deportes')", "def _add_artwork(self, artwork):\n\n insert_artwork = 'INSERT INTO artworks (artwork, price, artist_id, for_sale) VALUES (?, ?, ?, ?)'\n\n try:\n with sqlite3.connect(db_path) as conn:\n conn.execute(insert_artwork, (artwork.artwork, artwork.price, artwork.artist_id, artwork.for_sale))\n # new_id = res.lastrowid # Get the ID of the new row in the table\n # artwork.artist_id = new_id # Set this artist's ID\n conn.close()\n return True\n except sqlite3.IntegrityError as e:\n print(f'\\nError - Artwork with that name is already in the database.\\n', e)\n return False", "async def update(self, *args, **kwargs):\n if not self.__bought:\n random_stock = 1\n stock_price = self.priceindicator[random_stock].price\n if stock_price != 0:\n random_const = float(decimal.Decimal(random.randrange(-5,5))/100)\n stock_price = stock_price + stock_price*random_const\n stock_price = int(stock_price)\n await self.place_buy_order(random_stock, self.settings[\"stocks_per_company\"], stock_price, 1)\n log_message = \"StockBuyerBot(\" + self.name + \") bought \" + str(random_stock)\n print(log_message)\n else:\n log_message = \"StockBuyerBot(\" + self.name + \") bought nothing\"\n print(log_message)\n self.add_to_log(self.id, log_message)" ]
[ "0.76929826", "0.66940457", "0.6220531", "0.604035", "0.6011558", "0.58244693", "0.5766954", "0.56088907", "0.5524131", "0.548621", "0.5472159", "0.5438635", "0.5438635", "0.5418989", "0.539853", "0.5393509", "0.5359396", "0.53583866", "0.53555226", "0.534612", "0.531315", "0.5284787", "0.5282138", "0.5275016", "0.5259241", "0.52583385", "0.5251625", "0.5250533", "0.52435243", "0.5240717" ]
0.7106835
1
prints all artwork for testing purposes
def display_all(): results = artwork_db.get_all_artwork() for artist in results: print(artist)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display(self):\n art = \"\\n\".join([\"\".join(row) for row in self.text])\n if self.args.output:\n with open(self.args.output, \"w\") as f:\n f.write(art)\n\n if self.args.verbose:\n print(art)", "def test_artwork(self):\n # Create some art with a known tag\n user = sim.sim_user()\n artwork = sim.sim_artwork(user=user)\n tag = sim.sim_tag()\n artwork.tag_objs.append(tag)\n model.session.flush()\n\n # Ensure it shows in the tag's gallery\n res = self.app.get(self.url('tags.artwork', tag=tag))\n assert artwork.title in res", "def test_visualize_recipe_nutrition(self):\n pass", "def visualize(stuff, **options):\n separate = r\"\\newpage\" #by default, a new tupel is put on a new page\n name = \"some_text_file\" #by default this file is used\n for key in options:\n if key == \"separate\":\n separate = options[key]\n if key == \"name\":\n name = options[key]\n works = True\n totallines = [r\"\\documentclass{article}\", r\"\\usepackage{xcolor}\", r\"\\usepackage{tikz,pgf}\", r\"\\usepackage[left = 0 cm, top = 0cm, bottom = 0cm, right = 2cm]{geometry}\", r\"\\begin{document}\", r\"\\pagestyle{empty}\"]\n for description in stuff:\n data = stuff[description]\n if checkdataformat(description, data):\n if description == \"config\":\n lines = gentikz(data)\n elif description == \"movelist\":\n lines = showmoveslist(data[0], data[1], data[2])\n elif description == \"movelists\":\n lines = compareshowmoveslists(data[0], data[1], data[2])\n elif description == \"list\":\n lines = showlist(data)\n elif description == \"configurations\":\n lines = showconfigurations(data)\n elif description == \"movetable\":\n lines = nktable(data[0], data[1], sort = 'value')\n elif description == \"incrementtable\":\n lines = nktable(data[0], data[1], sort = 'increment')\n elif description == \"totalptable\":\n lines = nktable(data[0], data[1], sort = 'totalpossibilities')\n elif description == \"ptable\":\n lines = nktable(data[0], data[1], sort = 'adjustedpossibilities')\n elif description == \"bfptable\":\n lines = nktable(data[0], data[1], sort = 'bfadjustedpossibilities')\n else:\n print(\"unknown description\")\n lines = []\n for line in lines:\n totallines.append(line)\n totallines.append(separate)\n else:\n print(description, \":\", data, \"don't match, please read help(visualization)\")\n works = False\n totallines.append(r\"\\end{document}\")\n if works:\n compile(totallines, name)", "def starfish(ctx):\n art = art_string()\n print_art = True\n sub = ctx.command.get_command(ctx, ctx.invoked_subcommand)\n if hasattr(sub, \"no_art\"):\n print_art = not sub.no_art\n if print_art:\n print(art)", "def test_get_art_info(self):\n pass", "def test_visualize():\n # Instantiate three particles for testing\n particles = [Particle(0.3, 0.5, 1), \n Particle(0.0, -0.5, -1), \n Particle(-0.1, -0.4, 3)]\n simulator = ParticleSimulator(particles)\n visualize(simulator)", "def write_art_science_pages(outfile: TextIO, do_print: bool, artlist: list, refdict: dict) -> None:\n if do_print:\n start_page_division(outfile, \"index_page\")\n media_path = MEDIA_PATH\n else:\n common_html_header(outfile, \"Fiddler Crab Art - Scientific\")\n media_path = \"\"\n outfile.write(\" <header id=\\\"\" + init_data().art_sci_url + \"\\\">\\n\")\n outfile.write(\" <h1 class=\\\"bookmark1\\\">Scientific Drawings</h1>\\n\")\n outfile.write(\" </header>\\n\")\n outfile.write(\"\\n\")\n artsource = []\n cnt = 0\n for art in artlist:\n if art.art_type == \"science\":\n cnt += 1\n if art.cite_key != \"n/a\":\n ref = refdict[art.cite_key]\n artist = ref.citation\n else:\n artist = art.author + \" (\" + art.year + \")\"\n try:\n artsource.index(artist)\n except ValueError:\n artsource.append(artist)\n outfile.write(\" <p>\\n\")\n outfile.write(\" Formal scientific drawings are often works of art as well as scientific illustration. \"\n \"These are ordered chronologically.\\n\")\n outfile.write(\" </p>\\n\")\n outfile.write(\" <p>\\n\")\n outfile.write(\" Total scientific drawing count is \" + str(cnt) + \".\\n\")\n outfile.write(\" </p>\\n\")\n for a in artsource:\n outfile.write(\" <h3 class=\\\"nobookmark\\\">\" + a + \"</h3>\\n\")\n for art in artlist:\n if art.art_type == \"science\":\n if art.cite_key != \"n/a\":\n ref = refdict[art.cite_key]\n artist = ref.citation\n else:\n artist = art.author + \" (\" + art.year + \")\"\n if artist == a:\n outfile.write(\" <figure class=\\\"sppic\\\">\\n\")\n outfile.write(\" <a href=\\\"\" + rel_link_prefix(do_print, \"art/\") + art.image +\n \".html\\\"><img class=\\\"thumbnail\\\" src=\\\"\" + media_path + \"art/\" +\n art.image + \"_tn.\" + art.ext + \"\\\" alt=\\\"\" + art.title + \" thumbnail\\\" title=\\\"\" +\n art.title + \"\\\" /></a>\\n\")\n outfile.write(\" </figure>\\n\")\n if do_print:\n end_page_division(outfile)\n else:\n common_html_footer(outfile)\n for a in artsource:\n for art in artlist:\n if art.art_type == \"science\":\n if art.cite_key != \"n/a\":\n ref = refdict[art.cite_key]\n artist = ref.citation\n else:\n artist = art.author + \" (\" + art.year + \")\"\n if artist == a:\n if do_print:\n write_specific_art_page(outfile, do_print, art, init_data().art_sci_url,\n \"All Scientific Drawings\", refdict)\n else:\n with open(WEBOUT_PATH + \"art/\" + art.image + \".html\", \"w\", encoding=\"utf-8\") as suboutfile:\n write_specific_art_page(suboutfile, do_print, art, init_data().art_sci_url,\n \"All Scientific Drawings\", refdict)", "def main():\n\n viewer = Viewer(1900, 1200)\n viewer.add((init_BaracuddaFish()))\n viewer.add(init_BlueStarFish())\n viewer.add_movable(init_SeaSnake())\n init_groupeOfFishs(viewer)\n\n under_water = [\n 'res/skybox/underwater/uw_lf.jpg',\n 'res/skybox/underwater/uw_rt.jpg',\n 'res/skybox/underwater/uw_up.jpg',\n 'res/skybox/underwater/uw_dn.jpg',\n 'res/skybox/underwater/uw_ft.jpg',\n 'res/skybox/underwater/uw_bk.jpg']\n viewer.add(Skybox(under_water))\n\n viewer.run()", "def print_products(self):\n for f in self.files:\n sys.stdout.write(\n 'Detected File <{0}> {1} (EPSG: {2}){3}'.format(str(f), '1' if self.__check_projection(f) else '0',\n str(self.__raster_epsg(f)), os.linesep))", "def print_products(self):\n for f in self.files:\n sys.stdout.write(\n 'Detected File <{0}> {1} (EPSG: {2}){3}'.format(str(f), '1' if self.__check_projection(f) else '0',\n str(self.__raster_epsg(f)), os.linesep))", "def test_show_examples():\n skip_if_no_matplotlib()\n skip_if_no_data()\n with open('temp.yaml', 'w') as f:\n f.write(\"\"\"\n!obj:pylearn2.datasets.mnist.MNIST {\n which_set: 'train'\n}\n\"\"\")\n show_examples('temp.yaml', 28, 28, out='garbage.png')\n os.remove('temp.yaml')", "def write_art_crafts_pages(outfile: TextIO, do_print: bool, artlist: list, refdict: dict) -> None:\n if do_print:\n start_page_division(outfile, \"index_page\")\n media_path = MEDIA_PATH\n else:\n common_html_header(outfile, \"Fiddler Crab Crafts\")\n media_path = \"\"\n outfile.write(\" <header id=\\\"\" + init_data().art_craft_url + \"\\\">\\n\")\n outfile.write(\" <h1 class=\\\"bookmark1\\\">Arts &amp; Crafts</h1>\\n\")\n if not do_print:\n outfile.write(\" <nav>\\n\")\n outfile.write(\" <ul>\\n\")\n outfile.write(\" <li><a href=\\\"#origami\\\">Origami</a></li>\\n\")\n outfile.write(\" </ul>\\n\")\n outfile.write(\" </nav>\\n\")\n outfile.write(\" </header>\\n\")\n outfile.write(\"\\n\")\n outfile.write(\" <h2 id=\\\"origami\\\" class=\\\"nobookmark\\\">Origami</h2>\\n\")\n artsource = []\n cnt = 0\n for art in artlist:\n if art.art_type == \"origami\":\n cnt += 1\n try:\n artsource.index(art.author)\n except ValueError:\n artsource.append(art.author)\n outfile.write(\" <p>\\n\")\n outfile.write(\" Male fiddler crabs are a particular challenge for origami because of the asymmetry, \"\n \"but a number of origami experts have developed fiddler crab models.\\n\")\n outfile.write(\" </p>\\n\")\n outfile.write(\" <p>\\n\")\n outfile.write(\" Total fiddler crab origami models is \" + str(cnt) + \".\\n\")\n outfile.write(\" </p>\\n\")\n for a in artsource:\n outfile.write(\" <h3 class=\\\"nobookmark\\\">\" + a + \"</h3>\\n\")\n for art in artlist:\n if art.art_type == \"origami\":\n if art.author == a:\n outfile.write(\" <figure class=\\\"sppic\\\">\\n\")\n outfile.write(\" <a href=\\\"\" + rel_link_prefix(do_print, \"art/\") + art.image +\n \".html\\\"><img class=\\\"thumbnail\\\" src=\\\"\" + media_path + \"art/\" +\n art.image + \"_tn.\" + art.ext + \"\\\" alt=\\\"\" + art.title + \" thumbnail\\\" title=\\\"\" +\n art.title + \"\\\" /></a>\\n\")\n outfile.write(\" </figure>\\n\")\n if do_print:\n end_page_division(outfile)\n else:\n common_html_footer(outfile)\n for a in artsource:\n for art in artlist:\n if art.art_type == \"origami\":\n if art.author == a:\n if do_print:\n write_specific_art_page(outfile, do_print, art, init_data().art_craft_url, \"All Crafts\",\n refdict)\n else:\n with open(WEBOUT_PATH + \"art/\" + art.image + \".html\", \"w\", encoding=\"utf-8\") as suboutfile:\n write_specific_art_page(suboutfile, do_print, art, init_data().art_craft_url, \"All Crafts\",\n refdict)", "def test_plot_images(self):\n save_file(self.quart.plot_images)", "def main():\n lines, filename = get_filename()\n album_dictionary = extract_all_albums(lines)\n album_dictionary = read_sales(lines, album_dictionary)\n print_table(album_dictionary, filename)", "def test_create_Art_class(self):\n art = Art()\n art.load_image('images/bird.jpg')\n self.assertEqual(len(art.hue_bins), 50)\n self.assertEqual(art.short_name, 'bird')", "def _store_art(self):\n self.correct = self.fig.renderText(\"CORRECT!\")\n self.title = \"\"\"\n ____ _ _ \n | _ \\ _ _| |_| |__ ___ _ __ \n | |_) | | | | __| '_ \\ / _ \\| '_ \\ \n | __/| |_| | |_| | | | (_) | | | |\n |_| \\__, |\\__|_| |_|\\___/|_| |_|\n ____ |___/ _ __ __ _ \n / _ \\ _ _(_)___| \\/ | __ _ ___| |_ ___ _ __ \n | | | | | | | |_ / |\\/| |/ _` / __| __/ _ \\ '__|\n | |_| | |_| | |/ /| | | | (_| \\__ \\ || __/ | \n \\__\\_\\ \\__,_|_/___|_| |_|\\__,_|___/\\__\\___|_| \n\n \"\"\"\n self.incorrect = \"\"\"\n _ _ __\n | \\ | | ___ _ __ ___ _ / /\n | \\| |/ _ \\| '_ \\ / _ \\ (_) / / \n | |\\ | (_) | |_) | __/ _ / / \n |_| \\_|\\___/| .__/ \\___| (_) /_/ \n |_| \n \"\"\"\n self.slash = self.fig.renderText(\"/\")\n\n return", "def debug_print(self):\n print self.title\n print self.storyline\n print self.poster_image_url\n print self.trailer_youtube_url\n print \"------\"", "def setup():\n setFormat()\n setFilename()\n setScreenMode()", "def print_all(self) -> None:\n\n print(\"title: \" + str(self.title))\n print(\"simple_title: \" + str(self.simple_title))\n print(\"info: \" + str(self.info))\n print(\"exists: \" + str(self.exists))\n print(\"categories: \" + str(self.categories))\n print(\"content: \" + str(self.content))", "def mine():\n\n fig = new_slide()\n slide_heading(fig, 'Lesser-maintained parts')\n\n theta = np.linspace(0, 2*np.pi)\n x = np.cos(theta - np.pi/2)\n y = np.sin(theta - np.pi/2)\n z = theta\n\n ax = fig.add_subplot(1, 2, 1, projection='3d')\n markerline, stemlines, baseline = ax.stem(\n x, y, z, linefmt='grey', markerfmt='D', bottom=np.pi)\n markerline.set_markerfacecolor('none')\n\n ax = fig.add_subplot(1, 2, 2)\n ax.axis('off')\n ax.imshow(imread('webagg.png'))\n\n yield fig", "def test_visualize_recipe_taste(self):\n pass", "def display(self):\n print (\"+\" + \"-\"*self.size + \"+\")\n for i in range(self.size):\n terrain_strs = [Terrain.display_string(self.array[j, i]) for j in range(self.size)]\n print(\"|\" + \"\".join(terrain_strs) + \"|\")\n print (\"+\" + \"-\"*self.size + \"+\")", "def test_display__method3(self):\n Rectangle.reset_objects()\n s3 = Square(3, 1, 3)\n f = io.StringIO()\n with contextlib.redirect_stdout(f):\n s3.display()\n self.assertEqual(f.getvalue(), \"\\n\\n\\n ###\\n ###\\n ###\\n\")", "def show(self):\n import Helpers\n for p in self.parts:\n color = (p[1][0]*255, p[1][1]*255, p[1][2]*255, 0)\n Helpers.show(p[0], color)", "def test_simple(self):\n image = self.design.layout.layers[0].images[0]\n assert len(image.traces) == 2", "def show_magicians(magicians):\n for magician in magicians:\n print(magician.title())", "def show_magicians(magicians):\n for magician in magicians:\n print(magician.title())", "def visualise(self):\n\n scores, education = self.get_data()\n self.write_data(scores, education)\n\n return True", "def display_artist_complete_portfolio(artist_name):\n if controls_utils.artist_has_work_in_db(artist_name):\n results = artwork_db.get_all_artwork_from_one_artist(artist_name)\n for piece in results:\n print(piece)\n else:\n print('Sorry, no artwork from this artist to display ')" ]
[ "0.6744282", "0.63890773", "0.61532515", "0.6067221", "0.6010384", "0.59992516", "0.5979724", "0.58644783", "0.5831193", "0.5777325", "0.5777325", "0.5749137", "0.5746105", "0.57324183", "0.57141465", "0.5633593", "0.5600985", "0.55797815", "0.5532287", "0.55314124", "0.5531153", "0.5523166", "0.5498861", "0.54881966", "0.548315", "0.5482991", "0.54675484", "0.54417986", "0.5426039", "0.54242957" ]
0.703469
0
Create an FtpUser instance from a URL. The scheme, username, password, host and port are extracted from the URL; any path data, query string and hash are ignored. A title is, if not specified, constructed from the scheme and host.
def create_from_url(self, url, title=None): url = urlparse(url) if url.scheme == 'ftp': port = 21 elif url.scheme == 'ftps': port = 990 elif url.scheme == 'sftp': port = 22 elif url.scheme == '': raise ValueError('URL has no scheme') else: raise ValueError("unknown scheme '{}' (known schemes: " 'ftp, ftps, sftp)'.format(url.scheme)) if '@' in url.netloc: username, host = url.netloc.split('@', 1) if ':' in username: username, password = username.split(':', 1) username = urlunquote(username) password = urlunquote(password) else: username = urlunquote(username) password = None else: username = password = None host = url.netloc if username is None or password is None: raise ValueError('username and password required') if ':' in host: host, port = host.split(':', 1) port = int(port) if title is None: title = '{protocol} details for {host}'.format( protocol={'ftps': 'FTP'}.get(url.scheme, url.scheme.upper()), host=host) instance = self.model(title=title, host=host, port=port, username=username) instance.encrypt_password(password) instance.save() return instance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_url(cls, url: URL, *, encoding: str = \"latin1\") -> Optional[\"BasicAuth\"]:\n if not isinstance(url, URL):\n raise TypeError(\"url should be yarl.URL instance\")\n if url.user is None:\n return None\n return cls(url.user, url.password or \"\", encoding=encoding)", "def as_url(self):\n\n if self.host.startswith(('http://', 'https://')):\n # Some persons have put HTTP details in an FtpUser. At least\n # partially any UI's fault, though still their fault...\n return self.host\n\n protocol, port, host = self.protocol, self.port, self.host\n\n if '://' in host:\n host = host.split('://', 1)[1]\n if '@' in host:\n # Probably already has the username and password embedded.\n # Sensible, I'd say, if contrary to the design of this thing.\n return self.host\n if ':' in host:\n host, port = host.split(':', 1)\n else:\n port = None\n else:\n protocol, port, host = self.protocol, self.port, self.host\n\n if (protocol, port) in (('ftp', 21), ('sftp', 22), ('ftps', 990)):\n port = None\n\n username = self.username\n password = self.decrypt_password()\n return '{scheme}://{auth}{host}{port}/'.format(\n scheme=protocol,\n auth='{}:{}@'.format(urlquote(username), urlquote(password))\n if username or password else '',\n host=host,\n port=':{}'.format(port) if port else '')", "def parse_url(url):\n bits = urlparse.urlsplit(url)\n print bits\n transport = bits[0]\n uphp = bits[1].split('@')\n user = ''\n passwd = ''\n if len(uphp) == 2:\n (user, passwd) = uphp.pop(0).split(':')\n\n hp = uphp[0].split(':')\n host = hp[0]\n if len(hp) == 2:\n port = int(hp[1])\n else:\n # Require subclass to default\n port = 0\n dirname, filename = bits[2].rsplit('/', 1)\n # params = map(lambda x: x.split('='), bits[3].split('&'))\n params = [x.split('=') for x in bits[3].split('&')]\n try:\n params = dict(params)\n except ValueError:\n params = {}\n anchor = bits[4]\n return (transport, user, passwd, host, port, dirname, filename, params, anchor)", "def __init__(self, url_str):\n parsed_url = requests.compat.urlparse(utils.to_str(url_str))\n netloc_parts = parsed_url.netloc.split(\"@\")\n if len(netloc_parts) == 1:\n username = password = None\n host_str = netloc_parts[0]\n else:\n username, password = netloc_parts[0].split(\":\")\n host_str = netloc_parts[1]\n\n host_parts = host_str.split(\":\")\n host = host_parts[0]\n\n if len(host_parts) == 1:\n port = 80\n else:\n port = int(host_parts[1])\n\n params = [\n (key, val[0] if val[0] else None)\n for key, val in parse_qs(parsed_url.query, True).items()\n ]\n\n self._info = dict(\n scheme=parsed_url.scheme or \"http\",\n username=username,\n password=password,\n host=host,\n port=port,\n path=parsed_url.path or \"/\",\n params=params,\n fragment=parsed_url.fragment\n )\n self._url = None", "def _parse_url(url):\n parts = urlparse(url)\n scheme = parts.scheme\n port = parts.port or None\n hostname = parts.hostname\n path = parts.path or ''\n virtual_host = path[1:] if path and path[0] == '/' else path\n return (scheme, unquote(hostname or '') or None, port,\n unquote(parts.username or '') or None,\n unquote(parts.password or '') or None,\n unquote(path or '') or None,\n unquote(virtual_host or '') or None,\n unquote(parts.query or '') or None,\n dict(dict(parse_qsl(parts.query))))", "def parse_service_url(url: str) -> Tuple[str, str, str]:\n pieces = urlparse(url)\n user = pieces.username\n password = pieces.password\n netloc = pieces.hostname\n if pieces.port is not None:\n netloc += f\":{pieces.port}\"\n url = urlunparse((\n pieces.scheme, netloc, pieces.path, None, None, None))\n return url, user, password", "def from_url(cls, url, **kwargs):\n kwargs = dict(kwargs)\n kwargs['format'] = 'url'\n # for now we only support ascii\n return cls(value=url.encode('ascii'), **kwargs)", "def parse_url(url):\n scheme, host, port, user, passwd, path, vhost, qs, qs_dict = _parse_url(url)\n return dict(scheme=scheme, hostname=host, port=port, username=user,\n password=passwd, path=path, virtual_host=vhost,\n query=qs, **qs_dict)", "def get_ftp_user_info(self):\n info_dic = {'username': None,\n 'password': None}\n if 'ftp' in self.protocol:\n start = self.__url.find('//') + 2 # Get the char after the '//'\n end = self.__url.find('@')\n if (start >= 0) and (end >= 0) and (end > start):\n info = self.__url[start:end]\n if info[0] is not ':':\n info_pair = info.split(':')\n if len(info_pair) > 1:\n info_dic['username'] = info_pair[0]\n info_dic['password'] = info_pair[1]\n return info_dic\n else:\n return None", "def _parse_url(url):\r\n if \":\" not in url:\r\n raise ValueError(\"url is invalid\")\r\n\r\n scheme, url = url.split(\":\", 1)\r\n\r\n parsed = urlparse(url, scheme=\"http\")\r\n if parsed.hostname:\r\n hostname = parsed.hostname\r\n else:\r\n raise ValueError(\"hostname is invalid\")\r\n port = 0\r\n if parsed.port:\r\n port = parsed.port\r\n\r\n is_secure = False\r\n if scheme == \"ws\":\r\n if not port:\r\n port = 80\r\n elif scheme == \"wss\":\r\n is_secure = True\r\n if not port:\r\n port = 443\r\n else:\r\n raise ValueError(\"scheme %s is invalid\" % scheme)\r\n\r\n if parsed.path:\r\n resource = parsed.path\r\n else:\r\n resource = \"/\"\r\n\r\n if parsed.query:\r\n resource += \"?\" + parsed.query\r\n\r\n return (hostname, port, resource, is_secure)", "def parse_url(url, port = 80):\n scheme = url[0:url.find(\"://\")]\n if scheme not in (\\\n 'file', 'ftp', 'gopher', 'hd1', 'http', 'https', \\\n 'imap', 'mailto', 'mms', \\\n 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu', \\\n 'sftp', 'shttp', \\\n 'sip', 'sips', 'snews', 'svn', 'svn+ssh', \\\n 'telnet', 'wais'):\n no_scheme = True\n url = url.replace(scheme, 'http', 1)\n else:\n no_scheme = False\n u = urlparse.urlparse(url)\n hasuser = u.netloc.find('@')\n d = {\n 'scheme' : (scheme if no_scheme else u.scheme),\n 'path' : u.path,\n 'query' : u.query,\n 'fragment' : u.fragment,\n 'user' : (u.username if u.username != None else ''),\n 'pass' : (u.password if u.password != None else ''),\n 'port' : (u.port if u.port != None else port),\n 'host' : u.netloc[((hasuser + 1) if (hasuser >= 0) else 0):]\n }\n return d", "def __init_url(self, url):\n scheme, netloc, path, query, fragment = urlparse.urlsplit(url)\n if scheme:\n self.__dict__['__scheme'] = str(scheme)\n self.__dict__['__url'] = urlparse.urlunsplit((scheme, netloc.lower(), path, query, fragment))\n else:\n self.__init_url(str(\"http://\" + url))", "def __init__(self, url):\n self._headers = {}\n \n parsed_url = urlparse.urlparse(url)\n if parsed_url.scheme and not parsed_url.netloc:\n # If we have a scheme but no netloc, someone's entered\n # a URL like 'foo.com:123'. Add an http://to the\n # start, and reparse.\n url = 'http://' + url\n parsed_url = urlparse.urlparse(url)\n \n if not parsed_url.scheme:\n # If no scheme was provided, then the url parsing\n # won't have worked. Reparse.\n scheme = 'http'\n url = '%s://%s' % (scheme, url)\n parsed_url = urlparse.urlparse(url)\n else:\n scheme = parsed_url.scheme\n\n if parsed_url.netloc.find(':') < 0:\n if scheme == 'http':\n netloc = parsed_url.netloc + ':80'\n else:\n netloc = parsed_url.netloc + ':443'\n else:\n # Already had an explicit port\n netloc = parsed_url.netloc\n \n # Normalise\n self.url = urlparse.urlunparse((scheme, netloc, parsed_url.path,\n parsed_url.params, parsed_url.query, parsed_url.fragment))\n self.parsed_url = urlparse.urlparse(self.url)", "def init_from_url(cls, url):\n init_kwargs = cls._validate_init_kwargs(url)\n return cls(**init_kwargs)", "def __init__(self, url: str):\n self.original_url = url\n self._parsed_url = urlparse(url)\n self.path = self._parsed_url.path.rstrip(\"/\")\n self.dirname, self.basename = os.path.split(self.path)\n self.basename_without_extension, self.extension = os.path.splitext(\n self.basename\n )\n self.hostname = self._parsed_url.hostname\n self.netloc = self._parsed_url.netloc\n self.scheme = self._parsed_url.scheme", "def _parse(self, string):\n modern_scheme = r\"\"\"\nssh://\n(?:\n (?P<user>[^@]+)\n@)? # user is anything but @, then the @ separator\n(?P<host>[^:/]+) # host is anything but : and /\n(:(?P<port>\\d+))? # optional port\n(/(?P<remote_dir>.*))? # optional remote directory\n\"\"\"\n match = re.match(modern_scheme, string, re.VERBOSE)\n if match:\n self._handle_match(match)\n else:\n old_scheme = \"\"\"\n(?P<user>[^@]+) # user is anything but @, and optional\n@ # mandatory @ separator\n(?P<host>[^:/]+) # host is anything but : and /\n(\n (:|/)? # directory separator is either : or /\n (?P<remote_dir>.*))? # remote directory is optional\n \"\"\"\n match = re.match(old_scheme, string, re.VERBOSE)\n if match:\n self._handle_match(match)\n else:\n raise URLParseError(\"\"\" \\\nCould not parse %s as a valid url.\nSupported schemes are\n\n user@host:directory\n\n ssh://user@host:port/directory\n\"\"\" % self.as_string)", "def rebuild_url(scheme, path, fragment, username,\n password, hostname, port, query):\n netloc = \"@\".join(filter(None, [\n \":\".join(\n filter(None, [\n username,\n password,\n ])\n ),\n \":\".join(\n filter(None, [\n hostname,\n str(port or ''),\n ])\n )\n ]))\n\n return urllib.parse.urlunsplit([\n scheme,\n netloc,\n path,\n query,\n fragment,\n ])", "def __init__(self, scheme, host, port, path, query=None):\n self._hash = None\n self._str = None\n self._scheme = self._makeEmptyNone(scheme)\n self._host = host\n self._port = port\n self._path = self._makeEmptyNone(path)\n self._query = self._makeEmptyNone(query)\n self._isRegularURI = True", "def __init__(self, url, username, password, **kwargs):\n self.url = url\n self.username = username\n self.password = password\n self.context = kwargs", "def __init__(self, url, proxy=None, username=None, password=None,\n auth=None, ssl_verify_cert=True):\n\n log.debug(\"url: \" + str(url))\n self.url = URL.objectify(url)\n\n # Prepare proxy info\n if proxy is not None:\n self.proxy = proxy\n # requests library expects the proxy url to have a scheme\n if re.match('^.*://', proxy) is None:\n self.proxy = self.url.scheme + '://' + proxy\n\n # add a port is one is not specified\n # TODO: this will break if using basic auth and embedding\n # username:password in the proxy URL\n p = self.proxy.split(\":\")\n if len(p) == 2:\n self.proxy += ':8080'\n log.debug(\"init - proxy: %s\" % (self.proxy))\n\n # Build global headers\n self.headers = {\"User-Agent\": \"Mozilla/5.0\",\n \"Content-Type\": \"text/xml\",\n \"Accept\": \"text/xml\"}\n if self.url.username is not None:\n username = unquote(self.url.username)\n password = unquote(self.url.password)\n\n self.username = username\n self.password = password\n self.auth = auth\n # TODO: it's possible to force through a specific auth method here,\n # but no test code for this.\n self.ssl_verify_cert = ssl_verify_cert\n self.url = self.url.unauth()\n log.debug(\"self.url: \" + str(url))", "def parse_url(url):\n url = urllib.parse.urlparse(url)\n query = urllib.parse.parse_qs(url.query)\n query_ = query.get('dn', query.get('title', ''))[0]\n if url.scheme == \"magnet\":\n return \"magnet:?xt={}\".format(query['xt'][0]), query_\n return \"http://{}{}{}\".format(*url[0:3]), query_", "def parse_url(url):\n (scheme, netloc, path, params, query, frag) = urlparse(url)\n\n # We only support web services\n if not scheme in ('http', 'https'):\n raise InvalidUrl('Scheme must be one of http or https')\n\n is_ssl = scheme == 'https' and True or False\n\n # Verify hostnames are valid and parse a port spec (if any)\n match = re.match('([a-zA-Z0-9\\-\\.]+):?([0-9]{2,5})?', netloc)\n\n if match:\n (host, port) = match.groups()\n if not port:\n port = is_ssl and '443' or '80'\n else:\n raise InvalidUrl('Invalid host and/or port: %s' % netloc)\n\n return (host, int(port), path.strip('/'), is_ssl)", "def __init__(self, url):\n self._url = urlparse(url)\n #self.validate()", "def __init__(self, repo_url, creds, branch, repo_path=None, validate=True):\n parsed = urlparse(repo_url)\n self.scheme = parsed.scheme\n self.hostname = parsed.hostname\n self.org, self.repo = parsed.path.strip('/').split('/')\n self.creds = creds\n self.branch = branch\n self.repo_path = repo_path\n self.git_repo = None\n self.validate = validate", "def __init__(self, hostname, username = None, password = None):\n self.hostname = hostname\n # See https://github.com/OpenEVSE/ESP8266_WiFi_v2.x/blob/master/src/html/openevse.js#L70\n # For OpenEVSE's Web UIs version of the regex\n self.regex = re.compile(\"\\\\$([^\\\\^]*)(\\\\^..)?\")\n if username and password:\n userpass = '%s:%s' % (username, password)\n self.authstring = base64.encodebytes(userpass.encode()).decode().rstrip()\n else:\n self.authstring = None", "def from_text(cls, text, lazy=False):\n _url = URL.from_text(text)\n return cls(_url, lazy=lazy)", "def parse_host(host):\n if not host:\n return None, u''\n if u':' in host:\n try:\n inet_pton(socket.AF_INET6, host)\n except socket.error as se:\n raise URLParseError('invalid IPv6 host: %r (%r)' % (host, se))\n except UnicodeEncodeError:\n pass # TODO: this can't be a real host right?\n else:\n family = socket.AF_INET6\n return family, host\n try:\n inet_pton(socket.AF_INET, host)\n except (socket.error, UnicodeEncodeError):\n family = None # not an IP\n else:\n family = socket.AF_INET\n return family, host", "def compose_url(scheme, username, password, hostname, port, path, key):\n if not hostname and not path:\n raise click.ClickException(\"Provide at least one of `hostname`, `path`\")\n\n query: typing.Optional[dict] = None\n if key:\n query = {k: v for k, v in key}\n\n composed_url = urls.URL.from_components(\n scheme=scheme,\n username=username,\n password=password,\n hostname=hostname,\n port=port,\n path=path,\n query=query,\n )\n\n # Output composed url (with no hidden secrets)\n click.echo(composed_url.url, nl=False)", "def from_url(cls, url: str, filter: int = None, image_kwargs: dict | None = None):\n filename = get_url(url, progress_bar=True)\n return cls(filename, filter=filter, image_kwargs=image_kwargs)", "def __init__(self, url):\n self.url = url\n self.parsed = requests.utils.urlparse(url)\n\n self.clean_netloc()\n\n if not self.parsed.netloc:\n raise ValueError('Wrong URL (Make sure \"http(s)://\" included)')\n\n self.adjust_url()" ]
[ "0.62467754", "0.6042227", "0.601402", "0.583077", "0.5744555", "0.5706708", "0.56767875", "0.551295", "0.54875374", "0.5467554", "0.54085314", "0.5384588", "0.5314198", "0.52867806", "0.52697563", "0.52619296", "0.52078646", "0.51161796", "0.51135653", "0.5037481", "0.49804032", "0.49750224", "0.49713925", "0.489337", "0.48930588", "0.4805268", "0.4795942", "0.4785173", "0.47773352", "0.4765775" ]
0.78416204
0
Should return 'ftp', 'ftps' or 'sftp', but can return other values if the host is instead a URL with a different scheme.
def protocol(self): if '://' in self.host: scheme, host = self.host.split('://', 1) return scheme elif self.port == 21: return 'ftp' elif self.port == 22: return 'sftp' elif self.port == 990: return 'ftps' else: # Uncertain, assume FTP. return 'ftp'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _match(cls, url, **kwargs):\n return url.scheme.startswith('ftp')", "def getSchemes(clazz):\n return [\"sftp\"]", "def default_protocol(self):\n return \"sftp://\"", "def test_scheme(self):\n self.assertEqual(self.ftp_case.scheme, \"ftp\")\n self.assertEqual(self.ldap_case.scheme, \"ldap\")\n self.assertEqual(self.news_case.scheme, \"news\")\n self.assertEqual(self.telnet_case.scheme, \"telnet\")\n self.assertEqual(self.urn_case.scheme, \"urn\")", "def get_protocol():\n if https():\n protocol = 'https'\n else:\n protocol = 'http'\n return protocol", "def available_protocols(self):\n return [\"ssh://\", \"sftp://\"]", "def _parse_host(host: str) -> str:\n urlparse_host = urlsplit(host).hostname\n if urlparse_host:\n # In this case, host = https://xx.cloud.databricks.com\n return urlparse_host\n else:\n # In this case, host = xx.cloud.databricks.com\n return host", "def host_to_site(host):\n\n if host:\n # www.facebook.com m.facebook.com l.facebook.com lm.facebook.com\n if host.endswith('facebook.com'):\n return 'Facebook'\n # youtu.be www.youtube.com youtube.com m.youtube.com\n elif host.endswith('youtube.com') or host == 'youtu.be':\n return 'YouTube'\n # old.reddit.com www.reddit.com\n elif host.endswith('reddit.com'):\n return 'Reddit'\n # t.co twitter.com\n elif host.endswith('twitter.com') or host == 't.co':\n return 'Twitter'\n elif host.endswith('tiktok.com'):\n return 'TikTok'\n return None", "def split_type_host(url):\n type, rest = urllib.splittype(url)\n host, selector = urllib.splithost(rest)\n return type, host, selector", "def scheme(self):\n return self.use_ssl and \"https\" or \"http\"", "def parse_host(host):\n if not host:\n return None, u''\n if u':' in host:\n try:\n inet_pton(socket.AF_INET6, host)\n except socket.error as se:\n raise URLParseError('invalid IPv6 host: %r (%r)' % (host, se))\n except UnicodeEncodeError:\n pass # TODO: this can't be a real host right?\n else:\n family = socket.AF_INET6\n return family, host\n try:\n inet_pton(socket.AF_INET, host)\n except (socket.error, UnicodeEncodeError):\n family = None # not an IP\n else:\n family = socket.AF_INET\n return family, host", "def _urlparse_splitscheme(url):\r\n # The scheme is valid only if it contains these characters.\r\n scheme_chars = \\\r\n \"abcdefghijklmnopqrstuvwxyz0123456789+-.\"\r\n\r\n scheme = \"\"\r\n rest = url\r\n\r\n spart = url.split(\":\", 1)\r\n if len(spart) == 2:\r\n\r\n # Normalize the scheme.\r\n spart[0] = spart[0].lower()\r\n\r\n # A scheme is valid only if it starts with an alpha character.\r\n if spart[0] and spart[0][0].isalpha():\r\n for char in spart[0]:\r\n if char not in scheme_chars:\r\n break\r\n (scheme, rest) = spart\r\n\r\n return scheme, rest", "def _get_host(self, scheme='', hostname_only=False):\n host = self.host or ''\n # urlparse requires '//' to be provided if scheme is not specified\n original_parsed = urlparse.urlsplit(host)\n if (not original_parsed.scheme and not host.startswith('//')) or original_parsed.hostname is None:\n host = '%s://%s' % (scheme, host) if scheme else '//%s' % host\n parsed = urlparse.urlsplit(host)\n\n if hostname_only:\n return parsed.hostname\n\n try:\n port = parsed.port or self.port\n except ValueError:\n port = self.port\n netloc = parsed.netloc if port is None else '%s:%s' % (parsed.hostname, port)\n\n url_components = list(parsed)\n url_components[1] = netloc\n ret = urlparse.urlunsplit(url_components)\n return ret.lstrip('/')", "def get_protocol(self):\n if self.ssl:\n return \"https\"\n else:\n return \"http\"", "def _valid_protocol_type(protocol):\n\n if protocol == 'ssh' or protocol == 'https':\n return True\n\n return False", "def as_url(self):\n\n if self.host.startswith(('http://', 'https://')):\n # Some persons have put HTTP details in an FtpUser. At least\n # partially any UI's fault, though still their fault...\n return self.host\n\n protocol, port, host = self.protocol, self.port, self.host\n\n if '://' in host:\n host = host.split('://', 1)[1]\n if '@' in host:\n # Probably already has the username and password embedded.\n # Sensible, I'd say, if contrary to the design of this thing.\n return self.host\n if ':' in host:\n host, port = host.split(':', 1)\n else:\n port = None\n else:\n protocol, port, host = self.protocol, self.port, self.host\n\n if (protocol, port) in (('ftp', 21), ('sftp', 22), ('ftps', 990)):\n port = None\n\n username = self.username\n password = self.decrypt_password()\n return '{scheme}://{auth}{host}{port}/'.format(\n scheme=protocol,\n auth='{}:{}@'.format(urlquote(username), urlquote(password))\n if username or password else '',\n host=host,\n port=':{}'.format(port) if port else '')", "def protocol(self, code):\n return self.url.scheme", "def _hostname(self, code, protocol=None):\n if protocol is None:\n protocol = self.protocol(code)\n if protocol == 'https':\n host = self.ssl_hostname(code)\n else:\n host = self.hostname(code)\n return protocol, host", "def test_parse_url_lowercase_host() -> None:\n assert indieauth._parse_url(\"http://ex.com/hello\").path == \"/hello\"\n assert indieauth._parse_url(\"http://EX.COM/hello\").hostname == \"ex.com\"\n\n parts = indieauth._parse_url(\"http://EX.COM:123/HELLO\")\n assert parts.netloc == \"ex.com:123\"\n assert parts.path == \"/HELLO\"", "def get_http_protocol(self):\n if self.cfg.ssl:\n return \"https\"\n else:\n return \"http\"", "def scheme(self) -> Optional[pulumi.Input[Union[str, 'HTTPSchemeType']]]:\n return pulumi.get(self, \"scheme\")", "def protocol(self):\n return 'https' if self.allow_https and self.is_secure else 'http'", "def has_compatible_scheme(url):\n return url.startswith(('http://', 'https://'))", "def supports_uri_scheme():\n\n pass", "def get_ftp_code(self, host):\n return self.weak_hosts.get(host)[0]", "def ensure_scheme(url: str, default: str = 'http') -> str:\n\n if not url:\n return url\n\n # purl (or to be precise urlparse) will parse empty host names ('abc.xyz')\n # wrongly, assuming the abc.xyz is a path. by adding a double slash if\n # there isn't one already, we can circumvent that problem\n if '//' not in url:\n url = '//' + url\n\n _url = URL(url)\n\n if _url.scheme():\n return url\n\n return _url.scheme(default).as_string()", "def extract_scheme(url):\n return urlsplit(url, \"http\").scheme", "def testNoScheme(self):\n if self.needScheme: return\n \n self.assertEqual([\"123.123.123.123\"], grab('123.123.123.123', self.needScheme))", "def getTransportURL( self, path, protocols = False ):\n res = self.exists( path )\n if res['OK']:\n for url in res['Value']['Successful']:\n if protocols and not self.protocol in protocols:\n res['Value']['Successful'].pop( url )\n res['Value']['Failed'][url] = 'Protocol not supported'\n continue\n if url[0] == '/':\n nameDict = self.getParameters()['Value']\n nameDict['FileName'] = url\n ret = pfnunparse( nameDict )\n if ret['OK']:\n res['Value']['Successful'][url] = ret['Value']\n else:\n res['Value']['Successful'].pop( url )\n res['Value']['Failed'][url] = ret['Message']\n else:\n res['Value']['Successful'][url] = url\n return res", "def getHost():" ]
[ "0.6515323", "0.6393162", "0.63670725", "0.6297537", "0.6294226", "0.6239581", "0.6223189", "0.6099167", "0.60723436", "0.59676903", "0.59063804", "0.5817976", "0.58139056", "0.5719531", "0.570742", "0.5649601", "0.5646128", "0.562775", "0.561612", "0.5568476", "0.5562886", "0.5560348", "0.5558261", "0.55549866", "0.5537288", "0.5528224", "0.5488166", "0.5475807", "0.54729277", "0.5455012" ]
0.7592643
0
``self`` as a URL, including the username and password where possible.
def as_url(self): if self.host.startswith(('http://', 'https://')): # Some persons have put HTTP details in an FtpUser. At least # partially any UI's fault, though still their fault... return self.host protocol, port, host = self.protocol, self.port, self.host if '://' in host: host = host.split('://', 1)[1] if '@' in host: # Probably already has the username and password embedded. # Sensible, I'd say, if contrary to the design of this thing. return self.host if ':' in host: host, port = host.split(':', 1) else: port = None else: protocol, port, host = self.protocol, self.port, self.host if (protocol, port) in (('ftp', 21), ('sftp', 22), ('ftps', 990)): port = None username = self.username password = self.decrypt_password() return '{scheme}://{auth}{host}{port}/'.format( scheme=protocol, auth='{}:{}@'.format(urlquote(username), urlquote(password)) if username or password else '', host=host, port=':{}'.format(port) if port else '')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, url, username, password, **kwargs):\n self.url = url\n self.username = username\n self.password = password\n self.context = kwargs", "def url(self, privacy=False, *args, **kwargs):\n\n # Our URL parameters\n params = self.url_parameters(privacy=privacy, *args, **kwargs)\n\n default_port = 443 if self.secure else 80\n\n # Determine Authentication\n auth = ''\n if self.user and self.password:\n auth = '{user}:{password}@'.format(\n user=NotifyPushjet.quote(self.user, safe=''),\n password=self.pprint(\n self.password, privacy, mode=PrivacyMode.Secret, safe=''),\n )\n\n return '{schema}://{auth}{hostname}{port}/{secret}/?{params}'.format(\n schema=self.secure_protocol if self.secure else self.protocol,\n auth=auth,\n # never encode hostname since we're expecting it to be a valid one\n hostname=self.host,\n port='' if self.port is None or self.port == default_port\n else ':{}'.format(self.port),\n secret=self.pprint(\n self.secret_key, privacy, mode=PrivacyMode.Secret, safe=''),\n params=NotifyPushjet.urlencode(params),\n )", "def __str__(self):\n if self.password is None:\n return self._url\n else:\n return self._url.replace(self.password+'@', '******@')", "def __command_url(self):\n return \"http://\" + self._host + \\\n \"/cgi-bin/hi3510/{}&-usr=\" + \\\n self._username + \"&-pwd=\" + self._password", "def link(self):\n return 'http://{}:{}'.format(self.basic_url, self.port)", "def url(self):\n return 'http://%s:%d' % (self._host, self._port)", "def create_url(self):\n\n # Format the template strings with the user credentials and host\n # information provided upon instantiation.\n url = self.sql_url_template\n url = url.format(\n username=self.sql_username,\n password=self.sql_password,\n host=self.sql_host,\n port=self.sql_port,\n db=self.sql_db\n )\n\n return url", "def __baseurl(self):\n return \"http://\" + self._host + \\\n \"/cgi-bin/hi3510/param.cgi?cmd={}&-usr=\" + \\\n self._username + \"&-pwd=\" + self._password", "def url(self):\n ...", "def auth_url(self):\n\n return \"{}?client_id={}&redirect_uri={}&scope={}&state={}\".format(AUTH_ENDPOINT, self.client_id,\\\n self.redirect_uri, self.scope, self.state)", "def Url(self) -> str:", "def authorization_url(self): # pragma: no cover\n raise NotImplementedError()", "def auth_url(self):\n return_url = self.redirect_uri\n nonce = self.strategy.random_string(64)\n self.add_nonce(nonce)\n\n payload = urlencode({\"nonce\": nonce, \"return_sso_url\": return_url})\n base_64_payload = urlsafe_b64encode(payload.encode(\"utf8\")).decode(\"ascii\")\n\n payload_signature = hmac.new(\n self.setting(\"SECRET\").encode(\"utf8\"),\n base_64_payload.encode(\"utf8\"),\n sha256,\n ).hexdigest()\n encoded_params = urlencode({\"sso\": base_64_payload, \"sig\": payload_signature})\n return f\"{self.get_idp_url()}?{encoded_params}\"", "def url (self):\n return Links.createURL('/')", "def _get_url(self):\n return 'http://{}:{}'.format(self.host, self.port)", "def set_credentials(self, username, password, url):\n # remove trailing slash off URL\n url = url.rstrip('/')\n # save variables to object\n self.url = url\n self.username = username\n self.password = password\n self.xml_rpc = '%s/server/xml.server.php' % (self.url)", "def TOURL(self) -> str:\n\t\treturn \"%s://%s:%d/\" % (\"https\" if self.useSSL else \"https\", self.toHost, self.toPort)", "def url(self):\n return self.full()", "def url(self):\n url = self.url\n return url", "def _make_url(self):\n ...", "def base_url(self):\n return 'http://%s/api.php?token=%s&path_info=' % \\\n (self.ac_url, self.api_key)", "def full_url(self):\n return \"%s://%s%s\" % (self.protocol, self.host, self.uri)", "def __init__(self, username, password, referrer, expiration=60,\n root_uri=_DEFAULT_ROOT_URI):\n self.username = username\n self.password = password\n self.expiration = expiration\n self.referrer = referrer\n self.root_uri = root_uri\n self._token = None\n self._request_token()", "def makeAccessURL(cls, baseURL):\n\t\treturn \"%s/%s\"%(baseURL, cls.name)", "def get_login_url(self, token=None, extra=None):\n if self.token is None and token is None:\n raise PhotobucketAPIError(\"token needs to be set on instance or provided.\")\n params = {}\n if extra:\n params['extra'] = extra\n params.update(dict(oauth_token=token or self.token))\n return \"%s?%s\" % (self.LOGIN, urllib.urlencode(params))", "def get_url(self, *args, **kwargs):\n raise NotImplementedError", "def user_info_url(self):\n return self._user_info_url", "def url(self, request_path=\"\"):\n return f\"{self.scheme}://{self.host}/{request_path}\"", "def __init__(self,username, password):\n self.username = username\n self.password = password", "def __init__(self, base_url, login=None, password=None):\n\n self._base_url = base_url.rstrip('/')\n self._login = login\n self._password = password" ]
[ "0.682425", "0.67110646", "0.66964537", "0.6692836", "0.64586484", "0.634907", "0.6341694", "0.6297712", "0.6285951", "0.62704325", "0.6253215", "0.6201056", "0.61710817", "0.61552453", "0.6132472", "0.6080954", "0.60625875", "0.60589164", "0.60349417", "0.60145766", "0.59989095", "0.5953575", "0.59341675", "0.59212977", "0.5912282", "0.59079915", "0.5900685", "0.5886523", "0.5876042", "0.5869592" ]
0.68590957
0
Build task A branch.
def build_task_a(self, x, y, is_training, ext_wts=None): config = self.config global_step = self.global_step if config.backbone_class == 'resnet_backbone': bb_config = config.resnet_config else: assert False, 'Not supported' proto_config = config.protonet_config opt_config = config.optimizer_config num_classes_a = self._num_classes_a # Classification branch for task A. h_a = self._run_backbone(x, is_training=is_training, ext_wts=ext_wts) self._h_a = h_a h_shape = h_a.get_shape() h_size = 1 for ss in h_shape[1:]: h_size *= int(ss) self._h_size = h_size if ext_wts is not None: w_class_a = weight_variable( [h_size, num_classes_a], init_method='numpy', dtype=self.dtype, init_param={'val': np.transpose(ext_wts['w_class_a'])}, wd=bb_config.wd, name='w_class_a') b_class_a = weight_variable([], init_method='numpy', dtype=self.dtype, init_param={'val': ext_wts['b_class_a']}, wd=0e0, name='b_class_a') else: w_class_a = weight_variable([h_size, num_classes_a], init_method='truncated_normal', dtype=self.dtype, init_param={'stddev': 0.01}, wd=bb_config.wd, name='w_class_a') b_class_a = weight_variable([num_classes_a], dtype=self.dtype, init_method='constant', init_param={'val': 0.0}, name='b_class_a') self._w_class_a = w_class_a self._b_class_a = b_class_a num_classes_a_dyn = tf.cast(tf.shape(b_class_a)[0], tf.int64) num_classes_a_dyn32 = tf.shape(b_class_a)[0] if proto_config.cosine_a: if proto_config.cosine_tau: if ext_wts is None: tau_init_val = 10.0 else: tau_init_val = ext_wts['tau'][0] tau = weight_variable([], dtype=self.dtype, init_method='constant', init_param={'val': tau_init_val}, name='tau') else: tau = tf.constant(1.0) w_class_a_norm = self._normalize(w_class_a, axis=0) h_a_norm = self._normalize(h_a, axis=1) dot = tf.matmul(h_a_norm, w_class_a_norm) if ext_wts is not None: dot += b_class_a logits_a = tau * dot else: logits_a = tf.matmul(h_a, w_class_a) + b_class_a self._prediction_a = logits_a self._prediction_a_all = self._prediction_a y_dense = tf.one_hot(y, num_classes_a) xent_a = tf.nn.softmax_cross_entropy_with_logits( logits=logits_a, labels=y_dense) xent_a = tf.reduce_mean(xent_a, name='xent') cost_a = xent_a self._cost_a = cost_a cost_a += self._decay() self._prediction_a = logits_a return logits_a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def trigger_build(self, *, branch=None, message=None):", "def create_branch(self):\n os.chdir(str(self.repository_path))\n sh.git.checkout('master')\n sh.git.checkout('-b', self.branch)\n logger.debug('Branch {} created', self.branch)", "def task_build(argv):\n pytaskmaster.generator(\"setup.py.in\", \"setup.py\", config)\n pytaskmaster.generator(\"pytaskmaster/version.py.in\", \"pytaskmaster/version.py\", config)\n shell(\"python setup.py bdist_wheel\")\n if \"--sign\" in argv:\n for file in os.listdir(\"dist\"):\n asc_file = \"dist/\" + file + \".asc\"\n if file.endswith(\".whl\") and not os.path.isfile(asc_file):\n shell(\"gpg --detach-sign -a dist/{}\".format(file))", "def execute_build(\n self,\n tasks: List[ReleaseTask],\n bld_args: RepoBuildArgs,\n ) -> None:", "def __add_simple_branch(self, action, task_activities):\n action_activity_name = create_activity_name_from_action(action=action)\n add_activity_to_task(task_activities=task_activities, activity_name=action_activity_name, hide_activity=True)\n\n post_or_element, stop_pre_or_tag = self.__add_pre_or_and_post_or(action=action,\n action_activity_name=action_activity_name,\n task_activities=task_activities)\n\n for branch in action.findall(\"./branches_Branch\"):\n branches_steps = branch.find(\"./branchBehaviour_BranchTransition\").findall(\".//steps_Behaviour\")\n branch_start_actions, final_successor_action = self.__get_final_successor_and_start(actions=branches_steps)\n # Create ActionFactory and add action\n self.__create_actions_for_all_branches(branches_steps, task_activities)\n\n branch_conditions = branch.findall(\".//branchCondition_GuardedBranchTransition\")\n for condition in branch_conditions:\n condition_types = self.__get_condition_types(condition)\n reference_name = self.__get_reference_name(condition_types=condition_types)\n variable_usage = get_element_by_identifier(attribute=\"referenceName\", search_string=reference_name,\n element_tree=self.xml_cache.get_xml_tree(\"usagemodel\"))\n\n for branch_start_action in branch_start_actions:\n parent = variable_usage.getparent()\n bool_exp = parent.find(\".//specification_VariableCharacterisation\").get(\"specification\")\n match_object = re.findall(r'true;+\\d\\.\\d*|false;+\\d\\.\\d*', bool_exp)\n # Get branch probability for post element\n branch_probability = \"0\"\n # First start action has false probability\n if \"NOT\" in condition_types:\n for matching_object in match_object:\n if \"false\" in matching_object:\n branch_probability = matching_object.split(\";\")[1]\n else:\n for matching_object in match_object:\n if \"true\" in matching_object:\n branch_probability = matching_object.split(\";\")[1]\n\n post_predecessor_activity_name = create_activity_name_from_action(action=branch_start_action)\n post_predecessor_activity = SubElement(post_or_element, 'activity')\n post_predecessor_activity.set(\"name\", post_predecessor_activity_name)\n post_predecessor_activity.set(\"prob\", branch_probability)\n\n self.__add_stop_action_precedences(final_successor_action, stop_pre_or_tag)", "def main(github_token, branch_name, repository, sha):\n create_branch(github_token, branch_name, repository, sha)\n click.echo(f\"Successfully created branch {branch_name}\")", "def build(ctx: typer.Context):\n from .tasks import build, main\n\n sys.argv = sys.argv[:1] + (ctx.args or [\"list\"])\n main(vars(build))", "def create_task():", "def _make_release_branch(self):\n user = getpass.getuser()\n if not user == self._user:\n raise Error('the command should only be run as user %s' % self._user)\n branch = self._branch\n # get the latest master updates\n subprocess.check_call('git remote update', shell=True)\n subprocess.check_call('git checkout master', shell=True)\n # does a git pull and updates the submodules\n GitUtil.update_submodules()\n # get the latest commit before the release is cut\n self._latest_commit = GitUtil.get_latest_commit()\n print 'Making release branch %s' % branch\n # create the new release branch\n GitUtil.create_branch(branch)\n print TermColor.ColorStr('Created remote branch %s' % branch, 'GREEN')", "def gen_task0():\n argc = 1\n goal = 'f'\n premise = 'b'\n ctx, targets = list(), list()\n # Generate according to goal <- premise\n args = r_consts(argc)\n # Add the successful ground case\n ctx.append([(premise, args)])\n targets.append(((goal, args), 1))\n # Fail on non-matching constant\n args = args.copy()\n args[R.randrange(len(args))] = r_consts(1, args)[0]\n preds = r_preds(3)\n ctx.append([(preds[0], args)])\n targets.append(((goal, args), 0))\n # Add padding length dummy rule\n vs = r_vars(argc)\n ctx.append([(preds[1], vs), (preds[2], vs)])\n preds.extend([goal, premise])\n gen_task(ctx, targets, preds)", "def __branch_factory(self, action, task_activities):\n branches = action.findall(\"./branches_Branch\")\n for branch in branches:\n branch_type = get_branch_type(branch=branch)\n if \"probabilistic\" == branch_type:\n return self.__add_probabilistic_branch(action=action, task_activities=self.task_activities)\n elif \"type\" == branch_type:\n return self.__add_type_branch(action=action, task_activities=task_activities)\n elif \"detailed\" == branch_type:\n return self.__add_detailed_branch(action=action, task_activities=task_activities)\n elif \"simple\" == branch_type:\n return self.__add_simple_branch(action=self.action, task_activities=self.task_activities)\n else:\n raise ValueError(\"Unknown branch_type. Abort Mission.\")", "def do_one_task(entry, channel):\n if channel not in [\"mu\", \"ele\"]:\n raise RuntimeError(\"channel arg must be mu or ele\")\n\n output_file = os.path.join(OUTPUT_DIR, entry.outname + \"_%s.root\" % channel)\n\n if os.path.isfile(output_file):\n print \"! Output file already exists - skipping this task\"\n return\n\n crab_dir = \"crab_%s_%s_my_feature\" % (entry.taskname, channel)\n\n status_dict = get_job_status(crab_dir)\n print status_dict\n # if not status_dict['finished']:\n # print \"crab jobs not finished - skipping\"\n # return\n\n sample_dir = entry.dataset.split(\"/\")[1]\n date_str = status_dict['task_name'].split(\":\")[0]\n input_str = os.path.join(NAF_DIR, sample_dir, crab_dir, date_str, \"0000\", \"tree_%s_*.root\" % channel)\n\n # actually do the hadding\n if RUN_ON_BIRD:\n qsub_command = \"\"\"qsub -N %s -v OUTPUTF=\"%s\",INPUTF=\"%s\" qsub_hadd.sh\"\"\" % (entry.taskname, output_file, input_str)\n # print qsub_command # Uncomment this line when testing to view the qsub command\n subprocess.check_call(qsub_command, shell=True)\n else:\n hadd_cmd = \"hadd %s %s\" % (output_file, input_str)\n print hadd_cmd\n subprocess.check_output(hadd_cmd, shell=True) # need shell=True for wildcard expansion?", "def branch(branch_name):\n env.branch = branch_name", "def branch(branch_name):\n env.branch = branch_name", "def build_and_deploy():\n\n with shell_env(TZ=_get_timezone()):\n _create_output_branch()\n _build_html()\n _git_commit_all()\n _git_push(_get_output_branch())", "def __add_detailed_branch(self, action, task_activities):\n add_activity_to_task(task_activities=task_activities,\n activity_name=self.activity_name,\n hide_activity=True)\n post_or_element, stop_pre_or_tag = self.__add_pre_or_and_post_or(action=self.action,\n action_activity_name=self.activity_name,\n task_activities=task_activities)\n branch_counter = 0\n branches = action.findall(\"./branches_Branch\")\n for branch in branches:\n branches_steps = branch.find(\"./branchBehaviour_BranchTransition\").findall(\"./steps_Behaviour\")\n branch_start_actions, final_successor_action = self.__get_final_successor_and_start(actions=branches_steps)\n self.__create_actions_for_all_branches(branches_steps, task_activities)\n # if branch is of type \"ProbabilisticBranchTransition\" it has entity \"branchProbability\":\n # use this to create or post tag\n branch_conditions = branch.findall(\".//branchCondition_GuardedBranchTransition\")\n for condition in branch_conditions:\n condition_types = self.__get_condition_types(condition)\n reference_name = self.__get_reference_name(condition_types=condition_types)\n variable_usage = get_element_by_identifier(attribute=\"referenceName\", search_string=reference_name,\n element_tree=self.xml_cache.get_xml_tree(\"usagemodel\"))\n for branch_start_action in branch_start_actions:\n parent = variable_usage.getparent()\n bool_exp = parent.find(\".//specification_VariableCharacterisation\").get(\"specification\")\n match_object = re.findall(r'\"t\";+\\d\\.\\d*|\"f\";+\\d\\.\\d*', bool_exp)\n # Get branch probability for post element\n branch_uuid, branch_probability = match_object[branch_counter].replace('\\\"', \"#\").split(\";\")\n post_predecessor_activity_name = create_activity_name_from_action(action=branch_start_action,\n uid_string=self.uid_string)\n post_predecessor_activity = SubElement(post_or_element, 'activity')\n post_predecessor_activity.set(\"name\", post_predecessor_activity_name)\n post_predecessor_activity.set(\"prob\", branch_probability)\n branch_counter += 1\n\n # Add precedence for stop actions\n self.__add_stop_action_precedences(final_successor_action, stop_pre_or_tag, uid=self.uid_string)", "def create_builds(self):\n branches = self.search([('use_in_ci', '=', True)])\n branches.create_build()\n return True", "def branch(self, *arguments, **kwargs):\n return self.get_output('branch', *arguments, **kwargs)", "def test_branch_true_with_dag_run(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n self.branch_1.set_upstream(branch_op)\n self.branch_2.set_upstream(branch_op)\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n for true_value in SUPPORTED_TRUE_VALUES:\n mock_get_records.return_value = true_value\n\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.NONE\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.SKIPPED\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")", "def create_branch(ctx, name, sha):\n\n try:\n\n gh = ctx.obj.github\n\n log.echo('Creating branch...', break_line=False)\n branch = gh.create_branch(name=name, sha=sha)\n log.checkmark()\n log.echo('Branch {} created at {}'.format(name, sha))\n return branch\n except BaseException as _:\n log.xmark()\n raise", "def build(_):", "def test_branch_single_value_with_dag_run(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n self.branch_1.set_upstream(branch_op)\n self.branch_2.set_upstream(branch_op)\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n mock_get_records.return_value = 1\n\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.NONE\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.SKIPPED\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")", "def task1(self):\n \n pass", "def generate_tasks(self, task):", "def task():", "def create_branch_from_issue(jira_url, jira_username, jira_api_key, project_key, source_branch_name, issue_key):\n click.echo('Branch \"{}\" was created'.format(\n create_branch_func(\n source_branch_name, get_branch_name(jira_url, jira_username, jira_api_key, issue_key, project_key)\n )\n ))", "def build_task_from_config(config, build_dep, as_root=False):\n if not isinstance(build_dep, dict):\n core = build_dep.get_plugin('enaml.workbench.core')\n cmd = 'exopy.app.dependencies.analyse'\n cont = core.invoke_command(cmd, {'obj': config})\n if cont.errors:\n raise RuntimeError('Failed to analyse dependencies :\\n%s' %\n cont.errors)\n\n cmd = 'exopy.app.dependencies.collect'\n cont = core.invoke_command(cmd, {'kind': 'build',\n 'dependencies': cont.dependencies})\n if cont.errors:\n raise RuntimeError('Failed to collect dependencies :\\n%s' %\n cont.errors)\n build_dep = cont.dependencies\n\n cls = config.pop('task_id')\n\n if as_root:\n return RootTask.build_from_config(config, build_dep)\n else:\n task_class = build_dep['exopy.task'][cls]\n return task_class.build_from_config(config, build_dep)", "def build() -> List[asyncio.Task]:", "def task_b():\n print 'Task B is starting task D'\n yield fibra.Async(task_x('D'))\n print 'Task B is finishing'", "def build_a_bear():\n if os.path.isdir(c.BEAR_PREFIX):\n logging.debug(\"skipping Bear installation\")\n return\n\n # download\n if not os.path.isfile(c.BEAR_ARCHIVE):\n curl = get_cmd_or_die(\"curl\")\n curl['-s', c.BEAR_URL, '-o', c.BEAR_ARCHIVE] & pb.TEE\n\n # remove any existing build dir since we don't know if\n # bear was built for the current host environment.\n if os.path.isdir(c.BEAR_SRC):\n shutil.rmtree(c.BEAR_SRC, ignore_errors=True)\n\n # unpack\n tar = get_cmd_or_die(\"tar\")\n with pb.local.cwd(c.DEPS_DIR):\n tar['xf', c.BEAR_ARCHIVE] & pb.TEE\n\n # cmake\n bear_build_dir = os.path.join(c.BEAR_SRC, \"build\")\n bear_install_prefix = \"-DCMAKE_INSTALL_PREFIX=\" + c.BEAR_PREFIX\n ensure_dir(bear_build_dir)\n with pb.local.cwd(bear_build_dir):\n cmake = get_cmd_or_die(\"cmake\")\n cmake[\"..\", bear_install_prefix] & pb.TEE\n make = get_cmd_or_die(\"make\")\n make[\"install\"] & pb.TEE" ]
[ "0.6055547", "0.5845551", "0.58151907", "0.5802773", "0.57201207", "0.5653951", "0.5646536", "0.5626943", "0.55422276", "0.5541642", "0.55228865", "0.55066115", "0.5382394", "0.5382394", "0.53815293", "0.5371971", "0.5292997", "0.5239128", "0.5233028", "0.5226324", "0.52124345", "0.52098167", "0.5187919", "0.51861334", "0.5154046", "0.51504034", "0.5147863", "0.514771", "0.51303285", "0.5120543" ]
0.6202664
0
Returns a set of slow weights.
def get_slow_weights(self): var_list = tf.trainable_variables() var_list = list(filter(lambda x: 'phi' in x.name, var_list)) layers = self.config.transfer_config.meta_layers if layers == "all": pass elif layers == "4": keywords = ['TaskB', 'unit_4_'] filter_fn = lambda x: any([kw in x.name for kw in keywords]) var_list = list(filter(filter_fn, var_list)) else: raise ValueError('Unknown finetune layers {}'.format(layers)) [log.info('Slow weights {}'.format(v.name)) for v in var_list] return var_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_weights(self):\n return []", "def get_weights(self):", "def get_weights(self):\n return [self.w, self.b]", "def weights(self) -> List[float]:", "def calculate_weights():\n weights = {}\n\n\n # estimate run time of step 1 (fast sweep)\n f_range = sweeper_script.settings['stop'] - sweeper_script.settings['start']\n N_samples = sweeper_script.settings['samplecount']\n df = f_range / N_samples\n\n t = N_samples / df\n\n weights['quick scan'] = t\n\n # estimate run time of step 2 (high res sweep)\n df = self.settings['high_res_df']\n N_samples = self.settings['high_res_N']\n\n t = N_samples / df\n\n weights['high res scan'] = t\n\n\n total_time = sum([v for k, v in weights.iteritems()])\n\n weights = {k: v/total_time for k, v in weights.iteritems()}\n\n print('weights',weights)\n\n return weights", "def return_weights(self):\n w0 = self.comparator.weight.data.numpy()\n b0 = self.comparator.bias.data.numpy()\n\n w1 = self.matcher.weight.data.numpy()\n b1 = self.matcher.bias.data.numpy()\n\n w2 = self.head.weight.data.numpy()\n b2 = self.head.bias.data.numpy()\n\n return w0, b0, w1, b1, w2, b2", "def get_weights(self):\n if self.W_tickers is not None:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] + list(self.W_tickers.values())\n if w is not None]\n else:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] if w is not None]", "def get_weights(self):\n if self.W_tickers is not None:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] + list(self.W_tickers.values())\n if w is not None]\n else:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] if w is not None]", "def get_weights(self):\n if self.W_tickers is not None:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] + list(self.W_tickers.values())\n if w is not None]\n else:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] if w is not None]", "def get_weights(self):\n return self.__weights", "def get_weights(self):\n if self.W_tickers is not None:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc, self.W_red_rec] + list(self.W_tickers.values())\n if w is not None]\n else:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc, self.W_red_rec] if w is not None]", "def get_weights(self):\n return [self.W]", "def get_weights(self):\n return [self.W]", "def get_weights(self):\n return self._weights", "def get_weights(self):\n return self._weights", "def get_weights(self):\n return self._weights", "def get_weights(self):\r\n return self.weights", "def get_weights(self):\r\n return self.weights", "def get_weights(self):\n return self.weights", "def get_weights(self):\n return self.weights", "def get_weights(self):\n return self._weight", "def _generate_weights(self):\n weights = [random.uniform(0, 1) for x in range(self.num_weights)]\n return self._normalize_weights(weights)", "def get_weights(self, index):\n weight = self._weights[index]\n if isinstance(weight, dict):\n return list(weight.items())\n else:\n return [(Term(\"t\"), weight)]", "def get_weight_list(self) -> List[float]:\n return self._weight_list", "def getWeights(self, gameState, action):\n # return {'successorScore': 1.0}\n if self.isOffensive:\n return self.getOffensiveWeights(gameState, action)\n else:\n return self.getDefensiveWeights(gameState, action)", "def weights(self):\n return self._weights", "def weights(self):\n return self._weights", "def weights(self):\n return self._weights", "def weights(self):\n return self._weights", "def get_weights(self):\n # First part is iterating over hidden weights. Then append the output weight.\n return [self.hidden_layers[i].get_weight() for i in range(self.depth)] + \\\n [self.output_weight.cpu().detach().numpy()]" ]
[ "0.6849988", "0.683276", "0.6728212", "0.67091906", "0.66474336", "0.6596948", "0.658838", "0.658838", "0.658838", "0.65582275", "0.6556428", "0.6556385", "0.6556385", "0.65001744", "0.65001744", "0.65001744", "0.64975744", "0.64975744", "0.64801264", "0.64801264", "0.6453062", "0.6445901", "0.64333427", "0.63969153", "0.63859737", "0.63609314", "0.63609314", "0.63609314", "0.63609314", "0.6342199" ]
0.7462858
0
Evaluate one step on task A.
def eval_step_a(self, sess, task_a_data): x_a, y_a = task_a_data fdict = self.get_fdict(task_a_data=task_a_data) prediction_a, y_a = sess.run([self.prediction_a_all, self.labels_all], feed_dict=fdict) return prediction_a, y_a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_step_a(self, sess, task_a_data):\n x_a, y_a = task_a_data\n fdict = self.get_fdict(task_a_data=task_a_data)\n cost_a, _ = sess.run([self.cost_a, self.train_op_a], feed_dict=fdict)\n return cost_a", "def step_A(self, *args, **kwargs):\n # note - this could be done with a direct db query instead to avoid a loop, for a\n # unit test it's fine though\n if any(obj for obj in self.quester.contents if obj.tags.has(\"QuestA\", category=\"quests\")):\n self.quester.msg(\"Completed step A of quest!\")\n self.current_step = \"B\"\n self.progress()", "def eval_step_b(self, sess, task_b_data):\n raise NotImplemented()", "def task1(self):\n \n pass", "def train_step(self, sess, task_a_data, task_b_data):\r\n fdict = self._prerun(sess, None, task_b_data)\r\n sess.run(self._update_grads_b, feed_dict=fdict)\r\n train_op = self.train_op_b\r\n\r\n cost_b_v, _ = sess.run([self.cost_b_v, train_op],\r\n feed_dict=fdict)\r\n return None, None, cost_b_v", "def task_a():\n print 'Task A is starting task C'\n yield task_x('C')\n print 'Task A is finishing'", "def run(inputs):\n logger.debug(\"Running job %s\" % tick)\n \n #start = time.clock()\n start = datetime.datetime.now()\n try:\n result = task.evaluate(inputs)\n except:\n result = failure.Failure()\n finally:\n #end = time.clock()\n end = datetime.datetime.now()\n \n logger.debug(\"Running job %s finished\" % tick)\n \n #duration = end - start\n duration = (end - start).total_seconds()\n return traverser.EvalResult(result, duration)", "def main():\n A = np.array([\n [40, 36],\n [36, 45]\n ])\n b = np.array([-64, -66])\n c = 27\n solve(Task1TargetFunction(A, b, c))", "def run_one_step(self):\n pass", "def train_step(self, sess, task_a_data):\n raise NotImplemented()", "def step(self):\n raise TaskError(\"Task %s: subclass should override step() method!\" %\n self)", "def step(self, s, a):\n raise NotImplementedError", "def execute_action(self, a):\n return self.emulator.next(a)", "def task_stagnant(task):", "def run_single_task(model: api_model.Model, this_task, task_name):\n\n results_data = this_task.evaluate_model(model)\n task_info = this_task.get_task_details()\n\n assert isinstance(task_info, task.TaskMetadata), \\\n f'{task_name}: get_task_details() should return a TaskMetadata object'\n\n if isinstance(results_data, list):\n for k in results_data:\n assert isinstance(\n k, task.ScoreData\n ), f'{task_name}: evaluate_model() should return ScoreData object(s).' \n else:\n assert isinstance(\n results_data,\n task.ScoreData), f'{task_name}: evaluate_model() should return ScoreData object(s).'\n\n verify_keywords(task_info, task_name)", "def task_b():\n print 'Task B is starting task D'\n yield fibra.Async(task_x('D'))\n print 'Task B is finishing'", "def task2_3():", "def task():", "def run_task(self) -> Task:", "def step(self):\n if self.index >= len(self.tasklist):\n raise TaskError(\"step(): sequential compound task %s finished\" % self)\n\n self.check_state()\n\n # Select next task from the set and advance the index\n self.task = self.tasklist[self.index]\n self.index += 1\n\n return self.runTask(self.task)", "def step(self):\n # Fast learning\n task_embedding = self._ilp.infer_task()\n\n # Posterior update\n #self._skip_flag = self._is_graph_same(task_embedding, self._prev_task_embedding)\n self._skip_flag = False # XXX do not skip test\n if not self._skip_flag:\n self._grprop.observe_task(task_embedding)\n self._prev_task_embedding = task_embedding\n else:\n print(\"skipping!\")", "def _stage1(self):\n self.start_progress()\n tasks = list(self._chain_dict(self._model.adjust_tasks))\n if len(tasks) == 0:\n self._stage2(self._no_adjustments_case())\n else:\n task = lambda : self._run_adjust_tasks(tasks)\n locator.get(\"pool\").submit(task, self._stage2)", "def eval(self, A):\n\t\tpass", "def task():\n pass", "def task():\n pass", "def run_operation(task):\n return task.run()", "def do_t(self, arg):\n self.do_task(arg)", "def eval_step(self, *args, **kwargs):\n raise NotImplementedError", "def do_step(self) -> None:", "def task5(self):\n\n pass" ]
[ "0.65628225", "0.6294123", "0.6179907", "0.6160043", "0.61450297", "0.6023741", "0.59158623", "0.5862764", "0.5809173", "0.5795603", "0.5762719", "0.57541555", "0.5721735", "0.5702014", "0.5693075", "0.5686475", "0.56840885", "0.5677659", "0.5667791", "0.56597733", "0.5618881", "0.56152546", "0.55998486", "0.55788106", "0.55788106", "0.55616796", "0.55572915", "0.5552687", "0.55068964", "0.5481958" ]
0.68857276
0
Evaluate one step on task B.
def eval_step_b(self, sess, task_b_data): raise NotImplemented()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_step(self, sess, task_a_data, task_b_data):\r\n fdict = self._prerun(sess, None, task_b_data)\r\n sess.run(self._update_grads_b, feed_dict=fdict)\r\n train_op = self.train_op_b\r\n\r\n cost_b_v, _ = sess.run([self.cost_b_v, train_op],\r\n feed_dict=fdict)\r\n return None, None, cost_b_v", "def eval_step_b_old_and_new(self, sess, task_b_data):\n raise NotImplemented()", "def task_b():\n print 'Task B is starting task D'\n yield fibra.Async(task_x('D'))\n print 'Task B is finishing'", "def eval_step_b(self, sess, task_b_data):\r\n fdict = self._prerun(sess, None, task_b_data)\r\n prediction_b, y_b = sess.run([self.prediction_b_all, self.labels_b_v_all],\r\n feed_dict=fdict)\r\n\r\n return prediction_b, y_b", "def eval_step_a(self, sess, task_a_data):\n x_a, y_a = task_a_data\n fdict = self.get_fdict(task_a_data=task_a_data)\n prediction_a, y_a = sess.run([self.prediction_a_all, self.labels_all],\n feed_dict=fdict)\n return prediction_a, y_a", "def task1(self):\n \n pass", "def eval_step_b_custom_fetch(self, sess, fetches, task_b_data):\r\n fdict = self._prerun(sess, None, task_b_data)\r\n _ = self.solve_b(\r\n sess, task_b_data.x_train, task_b_data.y_train, fdict=fdict)\r\n return sess.run(fetches, feed_dict=fdict)", "def train_step_a(self, sess, task_a_data):\n x_a, y_a = task_a_data\n fdict = self.get_fdict(task_a_data=task_a_data)\n cost_a, _ = sess.run([self.cost_a, self.train_op_a], feed_dict=fdict)\n return cost_a", "def task_a():\n print 'Task A is starting task C'\n yield task_x('C')\n print 'Task A is finishing'", "def step_B(self, *args, **kwargs):\n if kwargs.get(\"complete_quest_B\", False):\n self.quester.msg(\"Completed step B of quest!\")\n self.quester.db.test_quest_counter = 0\n self.current_step = \"C\"\n self.progress()", "def step_A(self, *args, **kwargs):\n # note - this could be done with a direct db query instead to avoid a loop, for a\n # unit test it's fine though\n if any(obj for obj in self.quester.contents if obj.tags.has(\"QuestA\", category=\"quests\")):\n self.quester.msg(\"Completed step A of quest!\")\n self.current_step = \"B\"\n self.progress()", "def main():\n A = np.array([\n [40, 36],\n [36, 45]\n ])\n b = np.array([-64, -66])\n c = 27\n solve(Task1TargetFunction(A, b, c))", "def task2(self):\n\n pass", "def task2_3():", "def _prerun(self, sess, task_a_data, task_b_data):\r\n fdict = self.get_fdict(task_a_data=task_a_data, task_b_data=task_b_data)\r\n return fdict", "def step(self):\n # Fast learning\n task_embedding = self._ilp.infer_task()\n\n # Posterior update\n #self._skip_flag = self._is_graph_same(task_embedding, self._prev_task_embedding)\n self._skip_flag = False # XXX do not skip test\n if not self._skip_flag:\n self._grprop.observe_task(task_embedding)\n self._prev_task_embedding = task_embedding\n else:\n print(\"skipping!\")", "def run(inputs):\n logger.debug(\"Running job %s\" % tick)\n \n #start = time.clock()\n start = datetime.datetime.now()\n try:\n result = task.evaluate(inputs)\n except:\n result = failure.Failure()\n finally:\n #end = time.clock()\n end = datetime.datetime.now()\n \n logger.debug(\"Running job %s finished\" % tick)\n \n #duration = end - start\n duration = (end - start).total_seconds()\n return traverser.EvalResult(result, duration)", "def evaluate(self, states, actions, tasks):\n self._assert_is_batched(states, actions, tasks)\n return self._tf_call(self._evaluate, states, actions, tasks)", "def run_task(self) -> Task:", "def eval_curve_b(self, sess, task_b_data):\r\n fdict = self._prerun(sess, None, task_b_data)\r\n cost_b, acc_b, acc_b_v = self.monitor_b(\r\n sess,\r\n task_b_data.x_train,\r\n task_b_data.y_train,\r\n task_b_data.x_test,\r\n task_b_data.y_test,\r\n fdict=fdict)\r\n return cost_b, acc_b, acc_b_v", "def run_operation(task):\n return task.run()", "def _stage1(self):\n self.start_progress()\n tasks = list(self._chain_dict(self._model.adjust_tasks))\n if len(tasks) == 0:\n self._stage2(self._no_adjustments_case())\n else:\n task = lambda : self._run_adjust_tasks(tasks)\n locator.get(\"pool\").submit(task, self._stage2)", "def task():", "def step(self):\n raise TaskError(\"Task %s: subclass should override step() method!\" %\n self)", "def predict_taskAB(model, samples: List[Dict], tokenizer=None, step_size: int=32, label_tags: Dict=POLARITY_INV, verbose=False):\n print(\"[preds]: predicting on task A+B ...\")\n #model.freeze()\n predicted = [] # List[Dict] for output\n if verbose: \n print(\"sample_size:\", len(samples))\n print(samples[0])\n\n # pre-process data\n dataA_elems = _read_data_taskA(tokenizer=tokenizer, test=True, test_samples=samples, bert=True)\n #print(\"read_data_size:\", len(dataA_elems))\n\n for step in range(0,len(samples), step_size):\n # test step_size samples at a time\n if step+step_size <= len(samples):\n step_batch_A = dataA_elems[step:step+step_size]\n else:\n step_batch_A = dataA_elems[step:]\n\n if verbose: \n #print(\"step-A:\", step_batch_A)\n print(\"batch_size:\", len(step_batch_A))\n\n # use collate_fn to input step_size samples into the model\n x_A, _, _, tokens = raw2_collate_fn(step_batch_A)\n if verbose:\n print(\"sample_size:\", len(samples))\n #print(\"X-A:\", x_A)\n with torch.no_grad():\n # predict with modelAB\n for i in range(len(x_A)):\n out_A = model.A_model(x_A[i])\n\n logits_A = out_A.logits \n pred_tokens = torch.argmax(logits_A, -1)\n #print(pred_tokens)\n pred_terms, _ = get_preds_terms(pred_tokens, tokens[i], roberta=True)\n\n\n # build (term,aspect) couples to produce correct output for the metrics\n preds = []\n if verbose:\n print(\"\\npred terms:\", pred_terms)\n\n for j in pred_terms:\n # for each predicted term build a couple\n out_B = model.B_model([[x_A[i],j]])\n logits_B = out_B.logits \n pred_sents = torch.argmax(logits_B, -1)\n \n preds.append((j,label_tags[int(pred_sents)]))\n if verbose: print(\"[LOFFA]:\", preds)\n\n if verbose: print(\"[CACCA]:\", preds)\n predicted.append({\"targets\":preds})\n preds = []\n\n print(\"Num predictions:\", len(predicted))\n return predicted", "def build_task_a(self, x, y, is_training, ext_wts=None):\n config = self.config\n global_step = self.global_step\n if config.backbone_class == 'resnet_backbone':\n bb_config = config.resnet_config\n else:\n assert False, 'Not supported'\n proto_config = config.protonet_config\n opt_config = config.optimizer_config\n num_classes_a = self._num_classes_a\n\n # Classification branch for task A.\n h_a = self._run_backbone(x, is_training=is_training, ext_wts=ext_wts)\n self._h_a = h_a\n h_shape = h_a.get_shape()\n h_size = 1\n for ss in h_shape[1:]:\n h_size *= int(ss)\n self._h_size = h_size\n\n if ext_wts is not None:\n w_class_a = weight_variable(\n [h_size, num_classes_a],\n init_method='numpy',\n dtype=self.dtype,\n init_param={'val': np.transpose(ext_wts['w_class_a'])},\n wd=bb_config.wd,\n name='w_class_a')\n b_class_a = weight_variable([],\n init_method='numpy',\n dtype=self.dtype,\n init_param={'val': ext_wts['b_class_a']},\n wd=0e0,\n name='b_class_a')\n else:\n w_class_a = weight_variable([h_size, num_classes_a],\n init_method='truncated_normal',\n dtype=self.dtype,\n init_param={'stddev': 0.01},\n wd=bb_config.wd,\n name='w_class_a')\n b_class_a = weight_variable([num_classes_a],\n dtype=self.dtype,\n init_method='constant',\n init_param={'val': 0.0},\n name='b_class_a')\n self._w_class_a = w_class_a\n self._b_class_a = b_class_a\n num_classes_a_dyn = tf.cast(tf.shape(b_class_a)[0], tf.int64)\n num_classes_a_dyn32 = tf.shape(b_class_a)[0]\n\n if proto_config.cosine_a:\n if proto_config.cosine_tau:\n if ext_wts is None:\n tau_init_val = 10.0\n else:\n tau_init_val = ext_wts['tau'][0]\n tau = weight_variable([],\n dtype=self.dtype,\n init_method='constant',\n init_param={'val': tau_init_val},\n name='tau')\n else:\n tau = tf.constant(1.0)\n\n w_class_a_norm = self._normalize(w_class_a, axis=0)\n h_a_norm = self._normalize(h_a, axis=1)\n dot = tf.matmul(h_a_norm, w_class_a_norm)\n if ext_wts is not None:\n dot += b_class_a\n logits_a = tau * dot\n else:\n logits_a = tf.matmul(h_a, w_class_a) + b_class_a\n\n self._prediction_a = logits_a\n self._prediction_a_all = self._prediction_a\n y_dense = tf.one_hot(y, num_classes_a)\n xent_a = tf.nn.softmax_cross_entropy_with_logits(\n logits=logits_a, labels=y_dense)\n xent_a = tf.reduce_mean(xent_a, name='xent')\n cost_a = xent_a\n self._cost_a = cost_a\n cost_a += self._decay()\n self._prediction_a = logits_a\n return logits_a", "def task5(self):\n\n pass", "def task_stagnant(task):", "def monitor_b(self, sess, x_b_np, y_b_np, x_b_v_np, y_b_v_np, fdict=None):\r\n tconfig = self.config.transfer_config\r\n steps = tconfig.ft_optimizer_config.max_train_steps\r\n batch_size = tconfig.ft_optimizer_config.batch_size\r\n rnd = np.random.RandomState(0)\r\n # Re-initialize the fast weights.\r\n self.reset_b(sess)\r\n if fdict is None:\r\n fdict = {}\r\n if batch_size == -1:\r\n fdict[self.inputs_b] = x_b_np\r\n fdict[self.labels_b] = y_b_np\r\n fdict[self.inputs_b_v] = x_b_v_np\r\n fdict[self.labels_b_v] = y_b_v_np\r\n\r\n cost_b_list = np.zeros([steps])\r\n acc_b_list = np.zeros([steps])\r\n acc_b_v_list = np.zeros([steps])\r\n\r\n # Run 1st order.\r\n if tconfig.ft_optimizer_config.optimizer in ['adam', 'sgd', 'mom']:\r\n it = six.moves.xrange(steps)\r\n it = tqdm(it, ncols=0, desc='solve b')\r\n cost_b = 0.0\r\n for num in it:\r\n if batch_size == -1:\r\n # Use full batch size.\r\n x_, y_ = x_b_np, y_b_np\r\n else:\r\n # Use mini-batch.\r\n assert False\r\n x_, y_ = self.minibatch(x_b_np, y_b_np, batch_size, rnd=rnd)\r\n fdict[self.inputs_b] = x_\r\n fdict[self.labels_b] = y_\r\n cost_b, acc_b_tr, acc_b_v, _ = sess.run(\r\n [self.cost_b, self.acc_b_tr, self.acc_b_v, self._train_op_ft],\r\n feed_dict=fdict)\r\n cost_b_list[num] = cost_b\r\n acc_b_list[num] = acc_b_tr\r\n acc_b_v_list[num] = acc_b_v\r\n it.set_postfix(\r\n cost_b='{:.3e}'.format(cost_b),\r\n acc_b_tr='{:.3f}'.format(acc_b_tr * 100.0),\r\n acc_b_v='{:.3f}'.format(acc_b_v * 100.0))\r\n # Run 2nd order after initial burn in.\r\n elif tconfig.ft_optimizer_config.optimizer in ['lbfgs']:\r\n # Let's use first order optimizers for now.\r\n assert False, 'Not supported.'\r\n return cost_b_list, acc_b_list, acc_b_v_list", "def train_step(self, sess, task_a_data):\n raise NotImplemented()" ]
[ "0.71829134", "0.6771176", "0.6767714", "0.6698021", "0.63905007", "0.63236654", "0.62424046", "0.6112074", "0.60616606", "0.59855074", "0.59512186", "0.5926077", "0.5890276", "0.5878687", "0.58514184", "0.58282244", "0.58180755", "0.5784487", "0.57340306", "0.56864625", "0.56834805", "0.56725454", "0.5666419", "0.56526023", "0.564347", "0.5601609", "0.55697936", "0.55648506", "0.5494206", "0.54727364" ]
0.73499215
0
Searches for gitignore file in current and parent directories
def get_gitignore(path): if '.gitignore' in os.listdir(path): return parse_gitignore(os.path.join(path, '.gitignore')) else: full_path = os.path.abspath(path) if full_path == '/': return return get_gitignore(os.path.dirname(full_path))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gitignored(self, path):\n if path.startswith(self.options.target_repo.location):\n repo_prefix_len = len(self.options.target_repo.location) + 1\n path = path[repo_prefix_len:]\n return self.gitignore.match_file(path)", "def gitignore(self):\n patterns = []\n for path in ('.gitignore', '.git/info/exclude'):\n try:\n with open(pjoin(self.options.target_repo.location, path)) as f:\n patterns.extend(f)\n except FileNotFoundError:\n pass\n except IOError as e:\n logger.warning(f'failed reading {path!r}: {e}')\n return PathSpec.from_lines('gitwildmatch', patterns)", "def read_gitignore():\n excludes = []\n gitignore = Path(\".gitignore\")\n if gitignore.exists():\n with gitignore.open() as f:\n excludes += f.read().split(\"\\n\")\n else:\n raise ValueError(\n \"No exclude configuration option and no .gitignore file present\"\n )\n return excludes", "def _get_target_files(self) -> List[Path]:\n repo = get_git_repo()\n submodules = repo.submodules # type: ignore\n submodule_paths = [\n self._fname_to_path(repo, submodule.path) for submodule in submodules\n ]\n\n # resolve given paths relative to current working directory\n paths = [p.resolve() for p in self._paths]\n if self._base_commit is not None:\n paths = [\n a\n for a in (self._status.added + self._status.modified)\n # diff_path is a subpath of some element of input_paths\n if any((a == path or path in a.parents) for path in paths)\n ]\n changed_count = len(paths)\n click.echo(f\"| looking at {unit_len(paths, 'changed path')}\", err=True)\n paths = [\n path\n for path in paths\n if all(\n submodule_path not in path.parents\n for submodule_path in submodule_paths\n )\n ]\n if len(paths) != changed_count:\n click.echo(\n f\"| skipping files in {unit_len(submodule_paths, 'submodule')}: \"\n + \", \".join(str(path) for path in submodule_paths),\n err=True,\n )\n\n # Filter out ignore rules, expand directories\n self._ignore_rules_file.seek(0)\n patterns = Parser(self._base_path).parse(self._ignore_rules_file)\n\n file_ignore = FileIgnore(\n base_path=self._base_path, patterns=patterns, target_paths=paths\n )\n\n walked_entries = list(file_ignore.entries())\n click.echo(\n f\"| found {unit_len(walked_entries, 'file')} in the paths to be scanned\",\n err=True,\n )\n filtered: List[Path] = []\n for elem in walked_entries:\n if elem.survives:\n filtered.append(elem.path)\n\n skipped_count = len(walked_entries) - len(filtered)\n if skipped_count:\n click.echo(\n f\"| skipping {unit_len(range(skipped_count), 'file')} based on path ignore rules\",\n err=True,\n )\n\n relative_paths = [path.relative_to(self._base_path) for path in filtered]\n\n return relative_paths", "def _remove_gitignore_files(self, log_prompt: str) -> None:\n try:\n repo = git.Repo(self._content_repo)\n files_to_ignore = repo.ignored(self._facts[\"lint_files\"])\n for file in files_to_ignore:\n logger.info(f\"{log_prompt} - Skipping gitignore file {file}\")\n self._facts[\"lint_files\"] = [\n path\n for path in self._facts[\"lint_files\"]\n if path not in files_to_ignore\n ]\n\n except (git.InvalidGitRepositoryError, git.NoSuchPathError):\n logger.debug(\"No gitignore files is available\")", "def find_repo_root():\n path = os.getcwd()\n\n while \".git\" not in set(os.listdir(path)) and path != \"/\":\n path = os.path.dirname(path)\n\n if path == \"/\":\n raise Exception(\"No repo found, stopping at /\")\n\n return path", "def create_gitignore(child_dir: 'str') -> 'None': \n \n index = THIS_DIR.rfind(child_dir)\n parent_dir = THIS_DIR[:index]\n\n # list of files in directory\n gitignore_files = listdir(THIS_DIR)\n \n if not gitignore_files:\n return None\n \n text = ''\n store = []\n \n for file in gitignore_files:\n file_path = f'{THIS_DIR}/{file}'\n if file_path.endswith(EXT) == True:\n with open(file_path, 'r') as f: \n temp = f.read()\n text = text + temp + '\\n'\n store.append({'file': file})\n \n if len(text) == '':\n return None\n \n with open(f'{parent_dir}/.gitignore', 'w') as f:\n # delete file contents\n f.truncate(0)\n # write text to the new file\n f.write(text)\n \n print({\n 'directory': parent_dir,\n 'consolidated': store})\n return None", "def _find_git_files(dirname='', git_dir=None):\n file_list = []\n if git_dir is None:\n git_dir = pbr.git._run_git_functions()\n if git_dir:\n file_list = pbr.git._run_git_command(['ls-files', '-z'], git_dir)\n file_list += pbr.git._run_git_command(\n ['submodule', 'foreach', '--quiet', 'ls-files', '-z'],\n git_dir\n )\n # Users can fix utf8 issues locally with a single commit, so we are\n # strict here.\n file_list = file_list.split(b'\\x00'.decode('utf-8'))\n submodules = _get_submodules(git_dir)\n return [f for f in file_list if f and f not in submodules]", "def get_files(path):\n\n # In case path is singular file:\n if os.path.isfile(path):\n return [path]\n\n all_files = []\n\n # Look for gitignore upstream\n gilist = get_gitignore(path)\n\n # In case path is directory:\n\n # In case no gitignore was found in current directory or up\n if not gilist:\n for root, dirs, files in os.walk(path):\n dirs[:] = [d for d in dirs if d[0] != '.']\n\n # Constantly check for gitignore while walking\n if '.gitignore' in os.listdir(root):\n all_files.extend(get_files(root))\n dirs[:] = []\n files[:] = []\n\n for name in files:\n if not name.startswith('.'):\n all_files.append(os.path.join(root, name))\n\n # In case gitignore was found\n if gilist:\n for root, dirs, files in os.walk(path):\n dirs[:] = [d for d in dirs if d[0] != '.' and d not in gilist]\n\n # If root dir is in gitignore break and go to next directory\n for item in gilist:\n if fnmatch.fnmatch(root, item):\n dirs[:] = []\n break\n\n else:\n # If file is gitignore material break and go to next file\n for name in files:\n for item in gilist:\n if fnmatch.fnmatch(name, item) or item.endswith(name):\n break\n\n else:\n # Finally append the file if it passed all tests\n if not name.startswith('.') and name.endswith(EXTES):\n all_files.append(os.path.join(root, name))\n return all_files", "def _search_parent_dir(file_name):\n\n current_dir = os.getcwd()\n parent_dir = os.path.dirname(current_dir)\n while current_dir != parent_dir:\n if not os.path.splitdrive(current_dir)[-1]:\n return False\n file_list = os.listdir(current_dir)\n parent_dir = os.path.dirname(current_dir)\n\n if file_name in file_list:\n return current_dir\n\n else:\n current_dir = parent_dir\n return False", "def __find_config_file(current_dir):\n path = current_dir\n found = None\n\n while True:\n _try = os.path.join(path, CONFIG_FILE_NAME)\n if os.path.isfile(_try):\n found = _try\n break\n if os.path.dirname(path) == path:\n break\n path = os.path.dirname(path)\n return found", "def gitIgnoreContent( self, pars, directory ):\n\n name = pars['name']\n\n return f\"\"\"\\\n{name}-*.pkg*\nsrc/\npkg/\n\"\"\"", "def findRepositoryByBackTracking():\n \n cLookBack = '.'\n while(True):\n cDir = os.path.abspath(cLookBack)\n print(\"Searching in %s\" % cDir)\n if os.path.isdir( os.path.join(cDir, DB_SUBFOLDER) ):\n return cDir\n else:\n if os.path.abspath(cLookBack) == os.path.abspath(cLookBack + '/..'):\n return os.path.abspath('.')\n cLookBack = cLookBack + '/..'\n \n return cDir", "def find_git_repository(self, path):\n while path is not None:\n git_path = os.path.join(path,'.git')\n if os.path.exists(git_path) and os.path.isdir(git_path):\n return path\n path = os.path.dirname(path)\n return None", "def ignore_from_repo(self, directory, ignore):\n for filename in os.listdir(directory):\n if not filename.endswith('.rpm'):\n continue\n _, basename = filename.split('-', 1)\n ignore.add(basename[:-4])", "def _is_inside_ignored_dir(filename):\n ignore_dirs = ['./' + x for x in IGNORE_DIRS]\n return any([filename.startswith(x) for x in ignore_dirs])", "def _get_pyfilelist(srcpath, usegitignore=True) -> list:\n gitignorefile = srcpath / Path(\".gitignore\")\n if usegitignore and gitignorefile.exists():\n with gitignorefile.open('r') as f:\n lines = f.read().splitlines()\n gitignore = [\n srcpath / Path(line)\n for line in lines\n if not line.strip().startswith(\"#\")\n and len(line.strip()) > 1\n and Path(line).suffix == \"\"\n ] + [srcpath / Path(\".git\")]\n viablepaths = [\n p for p in srcpath.glob(\"*/\") if p.is_dir() and p not in gitignore\n ]\n filelist = set().union(*[set(p.glob(\"**/*.py\")) for p in viablepaths])\n filelist = filelist.union(*[set(srcpath.glob('*.py'))])\n else:\n filelist = srcpath.glob(\"**/*.py\")\n return [p.relative_to(srcpath) for p in filelist]", "def _find_repo() -> str:\n\tstart = os.path.abspath(os.getcwd())\n\tcurrent = start\n\twhile current != \"/\":\n\t\trepo = os.path.join(current, \".repo\")\n\t\tif os.path.exists(repo):\n\t\t\tLOGGER.debug(\"Found .repo at %s\", repo)\n\t\t\treturn repo\n\t\tcurrent = os.path.dirname(current)\n\traise RepoNotFoundError(\"Not .repo found in any directory along {}\".format(start))", "def _findconfigfile():\n\n # A ordered list of possible config files\n configfiles = [\"~/.githubhooksrc\",\n \"/etc/githubhooks\"]\n\n for configfile in configfiles:\n if os.path.isfile(os.path.expanduser(configfile)):\n return os.path.expanduser(configfile)\n\n # No valid config file found\n print \"ERROR: No valid config file found in any of the following locations:\"\n for configfile in configfiles:\n print \" - %s\" % configfile\n sys.exit(1)", "def repo_root() -> str:\n path = os.path.realpath(os.curdir)\n\n while True:\n if os.path.exists(os.path.join(path, \"setup.py\")):\n return path\n path = os.path.realpath(os.path.join(path, \"..\"))", "def check_run_in_root(path):\n candidate = pl.Path.cwd() / path\n for _ in candidate.glob(pattern='.gitattributes'):\n return\n for _ in candidate.glob(pattern='.svn'):\n return\n raise ValueError(f'{candidate} does not appear to be a git or svn root')", "def test_find_not_should_ignore_path_glob(self, tmp_path):\n plugin_folder_path = populate_dir(tmp_path)\n\n detected_files = set()\n should_ignore_files = {\n \"test_notload.py\",\n \"test_notload_sub.py\",\n \"test_noneload_sub1.py\",\n \"test_shouldignore.py\",\n }\n should_not_ignore_files = {\n \"test_load.py\",\n \"test_load_sub1.py\",\n }\n ignore_list_file = \".airflowignore_glob\"\n for file_path in find_path_from_directory(plugin_folder_path, ignore_list_file, \"glob\"):\n file_path = Path(file_path)\n if file_path.is_file() and file_path.suffix == \".py\":\n detected_files.add(file_path.name)\n assert detected_files == should_not_ignore_files\n assert detected_files.isdisjoint(should_ignore_files)", "def resolve_prettier_ignore_path(source_file_dir, st_project_path):\n ignore_file_path = in_source_file_path_or_project_root(source_file_dir, st_project_path, PRETTIER_IGNORE_FILE)\n if ignore_file_path is not None:\n return ignore_file_path\n\n dirs = generate_dirs(source_file_dir, limit=500)\n for d in dirs:\n target = os.path.join(d, PRETTIER_IGNORE_FILE)\n if os.path.exists(target):\n return target\n\n alt_dirs = ['~']\n for d in alt_dirs:\n d = os.path.expanduser(d)\n target = os.path.join(d, PRETTIER_IGNORE_FILE)\n if os.path.exists(target):\n return target\n\n return None", "def default_ignore(location):\n ignore = '\\n'.join(DEFAULT_IGNORE)\n with utils.cd(location):\n with open('.gitignore', 'w+') as f:\n f.write(ignore)", "def _locate_config_file(search_dirs):\n tried = []\n for base in search_dirs:\n for path in [os.path.join(base, CONFIG_NAME + suffix) for suffix in \".yml\", \".json\"]:\n log.debug(\"searching for config file: %s\", path)\n tried.append(path)\n if os.path.isfile(path):\n log.info(\"using config file: %s\", path)\n return path\n raise ConfigError(\"no config file found in: %s\" % \", \".join(tried))", "def test_find_not_should_ignore_path_regexp(self, tmp_path):\n plugin_folder_path = populate_dir(tmp_path)\n\n detected_files = set()\n should_ignore_files = {\n \"test_notload.py\",\n \"test_notload_sub.py\",\n \"test_noneload_sub1.py\",\n \"test_shouldignore.py\",\n \".airflowignore_glob\",\n }\n should_not_ignore_files = {\n \"test_load.py\",\n \"test_load_sub1.py\",\n }\n ignore_list_file = \".airflowignore\"\n for file_path in find_path_from_directory(plugin_folder_path, ignore_list_file):\n file_path = Path(file_path)\n if file_path.is_file() and file_path.suffix == \".py\":\n detected_files.add(file_path.name)\n assert detected_files == should_not_ignore_files\n assert detected_files.isdisjoint(should_ignore_files)", "def find_in_parent_dir(fname):\n p = os.path.abspath(os.path.curdir)\n \n while not os.path.exists(os.path.join(p, project_conf_name)):\n oldp, p = p, os.path.dirname(p)\n if p == oldp:\n return None\n \n return open(os.path.join(p, project_conf_name), 'r')", "def _populate_gitignore_items(self):\n\n # Reset the include_regexps and exclude_regexps.\n self.include_regexps = []\n self.exclude_regexps = [re.compile('.*\\.git/.*\\.lock'),\n re.compile('.*\\.baboon-timestamp'),\n re.compile('.*baboon.*')]\n\n # If there's a .gitignore file in the watched directory.\n if os.path.exists(self.gitignore_path):\n # Parse the gitignore.\n ignores = self._parse_gitignore()\n if ignores is not None:\n # Populate the regexps list with the ignores result.\n self.include_regexps += [re.compile(x) for x in ignores[0]]\n self.exclude_regexps += [re.compile(x) for x in ignores[1]]", "def FindCheckerFiles(path):\n if not path:\n Logger.fail(\"No source path provided\")\n elif os.path.isfile(path):\n return [ path ]\n elif os.path.isdir(path):\n foundFiles = []\n for root, dirs, files in os.walk(path):\n for file in files:\n extension = os.path.splitext(file)[1]\n if extension in [\".java\", \".smali\"]:\n foundFiles.append(os.path.join(root, file))\n return foundFiles\n else:\n Logger.fail(\"Source path \\\"\" + path + \"\\\" not found\")", "def find_git_dir(directory):\n directory = os.path.abspath(directory)\n if not os.path.exists(directory):\n return \"\"\n\n for _ in range(10):\n path = os.path.join(directory, \".git\")\n if os.path.exists(path):\n return directory\n\n if directory == \"/\":\n return \"\"\n\n directory = os.path.abspath(os.path.join(directory, os.pardir))\n\n return \"\"" ]
[ "0.7278777", "0.6781642", "0.64230764", "0.636225", "0.6207674", "0.6181297", "0.61577624", "0.61192256", "0.6116596", "0.61151797", "0.6023385", "0.6021016", "0.60138816", "0.5991891", "0.59749573", "0.5922214", "0.590698", "0.58640575", "0.5849364", "0.58041465", "0.57615376", "0.57551533", "0.5706413", "0.5663871", "0.56388307", "0.5629212", "0.5623259", "0.5618998", "0.56142163", "0.56009865" ]
0.6787644
1
Returns a list with gitignore's content
def parse_gitignore(gipath): gitignore_file = open(os.path.abspath(gipath), 'r') gilist = [] for row in gitignore_file.readlines(): if not row.startswith('#') and row != '\n': if row.endswith('/\n'): gilist.append(row[:-2]) else: gilist.append(row[:-1]) gitignore_file.close() return gilist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_gitignore():\n excludes = []\n gitignore = Path(\".gitignore\")\n if gitignore.exists():\n with gitignore.open() as f:\n excludes += f.read().split(\"\\n\")\n else:\n raise ValueError(\n \"No exclude configuration option and no .gitignore file present\"\n )\n return excludes", "def _parse_gitignore(self):\n gitignore_path = os.path.join(self.project_path, '.gitignore')\n lines = [] # contains each line of the .gitignore file\n results = [] # contains the result regexp patterns\n neg_results = [] # contains the result negative regexp patterns\n\n try:\n with open(gitignore_path, 'r') as f:\n lines = f.readlines()\n except IOError as err:\n raise BaboonException(format(err))\n\n # Sort the line in order to have inverse pattern first\n lines = sorted(lines, key=cmp_to_key(self._gitline_comparator))\n\n # For each git pattern, convert it to regexp pattern\n for line in lines:\n regexp = self._gitline_to_regexp(line)\n if regexp is not None:\n if not line.startswith('!'):\n results.append(regexp)\n else:\n neg_results.append(regexp)\n\n return neg_results, results", "def _parse_gitignore(self):\n gitignore_path = os.path.join(self.config.path, '.gitignore')\n lines = [] # contains each line of the .gitignore file\n results = [] # contains the result regexp patterns\n neg_results = [] # contains the result negative regexp patterns\n\n with open(gitignore_path, 'r') as f:\n lines = f.readlines()\n\n # Sort the line in order to have inverse pattern first\n lines.sort(self._gitline_comparator)\n\n # For each git pattern, convert it to regexp pattern\n for line in lines:\n regexp = self._gitline_to_regexp(line)\n if regexp is not None:\n if not line.startswith('!'):\n results.append(regexp)\n else:\n neg_results.append(regexp)\n\n return neg_results, results", "def _get_ignore_list(self) -> List[str]:\n if not self.exists():\n return []\n if self._file_exists():\n with open(self._path, \"r\", encoding=DefaultOpenEncoding.READ) as fh:\n return [line.rstrip() for line in fh if line]\n return []", "def git_ls_files():\n\tproc = subprocess.Popen(\n\t\t['git', 'ls-files'],\n\t\tstdin=subprocess.DEVNULL,\n\t\tstdout=subprocess.PIPE,\n\t\tstderr=None\n\t)\n\t(stdout, stderr) = proc.communicate()\n\tif proc.returncode != 0:\n\t\traise OSError(\"Cannot list version-controlled files\")\n\tfilenames = stdout.decode().split()\n\treturn list(filter(is_regular_file, filenames))", "def _get_pyfilelist(srcpath, usegitignore=True) -> list:\n gitignorefile = srcpath / Path(\".gitignore\")\n if usegitignore and gitignorefile.exists():\n with gitignorefile.open('r') as f:\n lines = f.read().splitlines()\n gitignore = [\n srcpath / Path(line)\n for line in lines\n if not line.strip().startswith(\"#\")\n and len(line.strip()) > 1\n and Path(line).suffix == \"\"\n ] + [srcpath / Path(\".git\")]\n viablepaths = [\n p for p in srcpath.glob(\"*/\") if p.is_dir() and p not in gitignore\n ]\n filelist = set().union(*[set(p.glob(\"**/*.py\")) for p in viablepaths])\n filelist = filelist.union(*[set(srcpath.glob('*.py'))])\n else:\n filelist = srcpath.glob(\"**/*.py\")\n return [p.relative_to(srcpath) for p in filelist]", "def get_library_content(self):\n from glob import glob\n try:\n os.path.isdir(self.source)\n lst = glob(self.source + '/*')\n except TypeError:\n lst = self.source\n dircheck = True\n while dircheck is True:\n dircheck = False\n newlst = []\n for entry in lst:\n if os.path.isdir(entry):\n newlst.extend(glob(entry + '/*'))\n dircheck = True\n else:\n newlst.append(entry)\n lst = newlst\n return lst", "def get_files_changed():\n files_list = []\n test = os.popen('git show --name-only')\n repo_location = os.popen('git rev-parse --show-toplevel')\n repo_location = repo_location.readlines()\n repo_location = repo_location[0]\n repo_location = repo_location.replace('\\n', '')\n if \"Not a git repository\" in repo_location:\n files_list.append(\"Not a git repository\")\n return files_list\n files_list.append(repo_location.split('/')[-1])\n output = test.readlines()\n for a in range(6, len(output)):\n files_list.append(output[a].replace('\\n', ''))\n return files_list", "def retrieve_tracked_files(self):\n result = []\n\n for key in self.repo.index.entries.keys():\n\n result.append(os.path.join(self.repo.working_dir, key[0]))\n\n return result", "def contents(self):\n entries = []\n walk = next(os.walk(self.path))\n entries.extend(LocalFolder(os.path.join(walk[0], f)) for f in walk[1])\n entries.extend(LocalFile(os.path.join(walk[0], f)) for f in walk[2])\n return entries", "def getcontent(self):\n filelist=[]\n if len(self.filelist) == 0:\n return \"empty directory\"\n else:\n for file in self.filelist:\n filelist.append(file)\n return filelist", "def gitIgnoreContent( self, pars, directory ):\n\n name = pars['name']\n\n return f\"\"\"\\\n{name}-*.pkg*\nsrc/\npkg/\n\"\"\"", "def get_list_of_comitted_files():\n files = []\n output = []\n try:\n output = subprocess.check_output(['git','diff-index', '--name-status', '--cached','HEAD']\n ).decode(\"utf-8\")\n except subprocess.CalledProcessError:\n print(\"Error diff files get: trace %s\" % subprocess.CalledProcessError)\n return files\n\n for result in output.split(\"\\n\"):\n logging.info(result)\n if result != '':\n match = modified.match(result)\n if match:\n files.append(match.group('name'))\n\n return files", "def getContentFiles():\n contentFiles = []\n for contentDir, subDirs, filenames in os.walk(sourceDir, followlinks=True):\n if shouldIgnore(contentDir):\n subDirs[:] = []\n continue\n for filename in filenames:\n if not shouldIgnore(filename):\n cf = ContentFile(os.path.join(contentDir, filename))\n log(`cf.path`)\n contentFiles.append(cf)\n return contentFiles", "def get_content_directories() -> List[str]:\n result:list[str] = []\n for current_path in os.listdir(\"content\"):\n if os.path.isdir(os.path.join(\"content\", current_path)):\n result.append(os.path.join(\"content\", current_path))\n return result", "def gitignore(self):\n patterns = []\n for path in ('.gitignore', '.git/info/exclude'):\n try:\n with open(pjoin(self.options.target_repo.location, path)) as f:\n patterns.extend(f)\n except FileNotFoundError:\n pass\n except IOError as e:\n logger.warning(f'failed reading {path!r}: {e}')\n return PathSpec.from_lines('gitwildmatch', patterns)", "def directory_contents(self, commit, path):\n\n tree = self._get_tree(commit, path)\n return [c[0] for c in tree]", "def contents(self):\n # list_folder on \"/\" isn't supported for some reason.\n path = \"\" if self.path == \"/\" else self.path\n result = execute(pdbox.dbx.files_list_folder, path)\n entries = [get_remote(None, meta=e) for e in result.entries]\n\n # TODO: Verify that this works.\n while result.has_more:\n # As long as there are more pages to look through,\n # add their contents to the list of entries.\n more = execute(pdbox.dbx.files_list_folder_continue, result.cursor)\n entries.extend(get_remote(None, meta=e) for e in more)\n\n return entries", "def _populate_gitignore_items(self):\n\n # Reset the include_regexps and exclude_regexps.\n self.include_regexps = []\n self.exclude_regexps = [re.compile('.*\\.git/.*\\.lock'),\n re.compile('.*\\.baboon-timestamp'),\n re.compile('.*baboon.*')]\n\n # If there's a .gitignore file in the watched directory.\n if os.path.exists(self.gitignore_path):\n # Parse the gitignore.\n ignores = self._parse_gitignore()\n if ignores is not None:\n # Populate the regexps list with the ignores result.\n self.include_regexps += [re.compile(x) for x in ignores[0]]\n self.exclude_regexps += [re.compile(x) for x in ignores[1]]", "def list_entries():\n _, filenames = default_storage.listdir(\"entries\")\n return list(sorted(re.sub(r\"\\.md$\", \"\", filename) for filename in filenames if filename.endswith(\".md\")))", "def _parse_hgignore(self):\n hgignore_path = os.path.join(self.config.path, '.hgignore')\n lines = [] # contains each line of the .hgignore file\n results = [] # contains the result regexp patterns\n\n # Mercurial supports several pattern syntaxes. The default\n # syntax used is Python/Perl-style regular expressions.\n syntax = 'regexp'\n\n with open(hgignore_path, 'r') as f:\n lines = f.readlines()\n\n for line in lines:\n # Mercurial supports several pattern syntaxes. The default\n # syntax used is Python/Perl-style regular expressions.\n # To change the syntax used, use a line of the following\n # form:\n #\n # syntax: NAME\n #\n # where NAME is one of the following:\n # regexp\n # Regular expression, Python/Perl syntax.\n # glob\n # Shell-style glob.\n new_syntax = self._get_hgignore_syntax(line)\n if new_syntax is not None:\n syntax = new_syntax\n else:\n if syntax == 'regexp':\n results += line\n elif syntax == 'glob':\n results += fnmatch.translate(line)", "def _load_files_from_repository(self) -> typing.List[upload.File]:\n with tempfile.TemporaryDirectory() as tempdir:\n self._execute_command(\n args=['git', 'clone', self.repository_folder, '.'],\n cwd=tempdir,\n )\n to_return = []\n # remove git internal files\n shutil.rmtree(pathlib.Path(tempdir) / '.git')\n for root, _, files in os.walk(tempdir):\n for file in files:\n upload_file = self._load_file(file, root, tempdir)\n to_return.append(upload_file)\n return to_return", "def list_entries():\n _, filenames = default_storage.listdir(\"entries\")\n return list(sorted(re.sub(r\"\\.md$\", \"\", filename)\n for filename in filenames if filename.endswith(\".md\")))", "def list_entries():\n _, filenames = default_storage.listdir(\"entries\")\n return list(sorted(re.sub(r\"\\.md$\", \"\", filename)\n for filename in filenames if filename.endswith(\".md\")))", "def untracked_files():\n res = run(\n \"cd %s ; git status\" % (SOURCE_ABSOLUTE),\n stdout=PIPE, stderr=PIPE,\n universal_newlines=True,\n shell=True\n )\n result = [line.strip() for line in res.stdout.split(\"\\n\")]\n\n files = [file\n for file in result if (file.endswith(\".txt\")\n and not (file.startswith(\"new file\") or\n file.startswith(\"deleted\") or file.startswith(\"modified\")))]\n\n return files", "def pdbfile_list():\n import glob, os\n os.chdir(\"../Data\")\n file_list = []\n for file in glob.glob(\"*.pdb\"):\n file_list.append(file)\n return file_list", "def list_contents(reader: UFOReader) -> list[str]:\n return reader.getImageDirectoryListing() # type: ignore", "def read_file_list(filename):\n\n # hint: when you read lines of files, there will be a \"newline\"\n # (end-of-line character) at the end of each line, and you want to\n # strip that off before you print it. Do some research on that!\n\n # with open(filename, 'r') as file:\n # print(file.read())\n #cwd = os.getcwd() # This gets the visual studio code opened location\n cwd = os.path.dirname(os.path.realpath(__file__))\n print(cwd)\n try:\n file_contents = Path(cwd + \"\\\\\" + filename).read_text()\n except:\n return \"File not found\"\n return file_contents", "def directory_readlines(package, filename):\n\n try:\n return open(os.path.join(package, filename), 'rt').readlines()\n except:\n pass\n\n return []", "def contents(filepath):\n f = open(filepath, 'r')\n rval = [x.rstrip(\"\\r\\n\") for x in f.readlines()]\n f.close()\n return rval" ]
[ "0.7136777", "0.68094444", "0.6735772", "0.6703383", "0.656776", "0.6520689", "0.6497992", "0.6473873", "0.64219946", "0.6379378", "0.6348677", "0.63169664", "0.6281277", "0.62797284", "0.62559724", "0.62339705", "0.62146395", "0.6168146", "0.61291796", "0.60304403", "0.602739", "0.6005494", "0.59970623", "0.59970623", "0.5994607", "0.5981926", "0.59740776", "0.59712416", "0.59688914", "0.595952" ]
0.70429176
1
Prints a report at the end of the search
def report(): global COUNTER if COUNTER > 0: print('\n\n') print('Searched {0} files'.format(SEARCHED)) print('Found {0} TODOs in {1} files'.format(COUNTER, F_COUNTER))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def report():\n pass", "def test_03_print_result(self, pages):\n pages.search.close_location_pop_up()\n pages.search.print_search_results()", "def fullreport(self):\n print \"\"\n print \"Liten2 Full Reporting\"\n print \"--------------------------------------\"\n for getsize in self.size_searched():\n print \"File Size searched:\\t %s MB\" % self.humanvalue(getsize[0]) \n print \"Total MB wasted:\\t %s MB\" % self.totalmb()\n for i in self.file_num():\n print \"Files found over %s MB:\\t %s\" % (self.humanvalue(getsize[0]), i[0])\n for i in self.total_files():\n print \"Total files searched:\\t %s\" % i[0]\n for dup_count in self.count_dups():\n print \"\"\n print \"Total Duplicate files found:\\t %s\" % dup_count[0]\n print \"--------------------------------------\"\n for paths in self.path_dups():\n print paths[0]", "def printreport():\n report = createreport()\n print(report[0])\n print(report[1])\n print(report[2])", "def printReport(self):\n\t\tself.app.printflush('Fetched: ' + str(self.fetched_count), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Processes: ' + str(self.processes), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Updated: ' + str(self.updated_count), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Average page load time: ' + str(self.average_time), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Returned with code: ' + repr(self.code_statistics), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Closing Processes... ', self.app.IGNORE_EXIT_FLAG)", "def print_quick_report():\r\n print('function not yet written')\r\n # print a summary of the report as a structured pandas dataframe\r\n #Summary will include only date title and sentiment\r", "def print_results(self):\n pass", "def report(self):\n #i need to figure out how to pass all these in a list or something, woof.\n self.report_generator_module.run(\\\n self.total,\\\n self.unique,\\\n self.top_10,\\\n self.top_10_base,\\\n self.lengths,\\\n self.counts,\\\n self.one_to_six,\\\n self.trailing_number,\\\n self.last_1digit,\\\n self.last_2digit,\\\n self.last_3digit,\\\n self.last_4digit,\\\n self.last_5digit,\\\n self.charset)", "def report():\n Robot.report()", "def printReport(self): \n \n print('Distribution: ', self._distribution_type)\n print('Distribution Type: ', str(self._measure_type).replace('MeasureType.','')) \n print('Type Detection Match: ', str(self._measure_type_match))\n print('MLE: ', str(self._mle))\n print('Goodness of Fit: ', str(self._gof)) \n print('Goodness of Fit Pass: ', str(self._pass)) \n print('Overall Score: ', str(self._score)) \n print('-------------')", "def report(self, **options):\n pass", "def report(self, output_dir):", "def generate_report():\n\n # Fetch the top 3 most viewed articles and number of views and print them\n articles_query = get_articles_query()\n popular_articles = execute_query(articles_query)\n print_top_articles(popular_articles)\n\n # Fetch the most popular authors and print them\n authors_query = get_authors_query()\n popular_authors = execute_query(authors_query)\n print_authors(popular_authors)\n\n # Print the days when there were more than 1% errors in HTTP requests\n errors_query = get_errorData_query()\n error_data = execute_query(errors_query)\n print_error_data(error_data)", "def report(self):\n for c in self._call_chain:\n print c.title\n print '=' * len(c.title)\n c.report()\n print", "def report_printing(cls):\n while True:\n print('Donor Name' + ' ' * 16 + '| Total Given | Num Gifts | Average Gift')\n print('-' * 66)\n print(donor_db.create_report())\n print('Returning to main menu...\\n')\n return", "def print_report(report_data):\n\n header = '\\nPROPERTY SUMMARY FOR \"{}\"\\n'.format(report_data['property_name'])\n print('* ' * (len(header) // 2))\n print(header)\n\n print('Property Type:'.ljust(25), report_data['property_type'])\n print('Number of Bedrooms:'.ljust(25), report_data['rooms'])\n print('Number of Bathrooms:'.ljust(25), report_data['bathrooms'])\n\n not_found = ['n/a'] # Print this if nothing found for category\n\n print('\\nAMENITIES:')\n\n for amenity in report_data['general_amenities']:\n print(' * ', amenity)\n\n print('\\nFAMILY AMENITIES:')\n\n for amenity in report_data['family_amenities'] or not_found:\n print(' * ', amenity)\n\n print('\\nSAFETY FEATURES:')\n\n for amenity in report_data['safety_feats'] or not_found:\n print(' * ', amenity)\n\n print('\\n')\n\n return", "def report(self):\n self.report_status()\n print\n self.report_charset()\n print\n self.report_key()\n print\n self.report_keyset()", "def report(request):\n return render_to_response('application/report.html',\n {'search_suggestions': _search_suggestions(),\n },\n context_instance=RequestContext(request));", "def gen_report(self):\n self.report = '#Report for {0}\\n'.format(self.ip)\n self.report += 'This report was generated by the chameleon pentest bot. We cannot grant 100% accurate results.\\n'\n self.report += '###Services:\\n'\n for service in self.services:\n self.report += '#####{0}:\\n- Port: {1}\\n- Info:{2}'.format(service.name, service.port, service.info)\n self.report += '###Vulnerabilities:\\n'\n for vuln in self.vulns:\n self.report += '- {0}\\n'.format(vuln.name)\n self.report += 'Open an issue for wrong results at github.com/coretool/chameleon.'", "def reports_cli():", "def _gen_report(self):\n print \"------------------------------------------\"\n print \"fio report\"\n print \"------------------------------------------\"\n print \"name\", \" \".join(f for f in FIELDS)\n # print fields\n for name in sorted(self.reports):\n report = self.reports[name]\n #print report\n print name, \" \".join(str(report.get(f)) for f in FIELDS)\n\n print \"*******************************************\"\n # print clats\n index = 0\n for name in sorted(self.reports):\n report = self.reports[name]\n if index == 0:\n print \"clat_percent\", \" \".join(\n str(c[0]) for c in report[\"clats\"])\n print name, \" \".join(str(c[1]) for c in report[\"clats\"])\n index += 1", "def print_intersection_report(self):\n try:\n filename = ''\n\n if self.data_filename:\n filename = '../output/' + self.data_filename + '.results.txt'\n else:\n filename = '../output/random.results.p' + str(self.parts) + '.n' + str(self.nodes) + '.txt'\n\n if self.verbose:\n print 'Printing Report data to ' + filename\n\n with open(filename, 'wt') as f:\n for report_row in self.intersection_report_data:\n f.write(str(report_row[0]) + '|' + str(report_row[1]) + '|' + str(report_row[2]))\n\n except Exception, e:\n print 'Unexpected error:', str(e)\n print 'Problems writing the data output file.'\n exit()", "def print_results(results):\n print(\"\\033[4m\\033[1m%-75s%s\\033[0m\" % (\"NAME\", \"ADDRESS\"))\n\n for selections in data:\n print(\"%-75s%s\" % (selections['applicant'], selections['location']))\n \n print(\"\\n\\033[1m--- PAGE \", page_num, \"---\\033[0m\\n\")", "def report(self) -> Any:", "def report(self, results):\n self.notice(\"Test Report\\n\")\n\n for count, group in enumerate(results, 1):\n results = (self._format_test(test, res) for test, res in group)\n results = (', ').join(results)\n self.notice(\"Test group %s:\\t%s\" % (count, results))\n\n self.divider()", "def display_results():\n pass", "def display_reports(self, layout): # pylint: disable=arguments-differ", "def print_report(stocks_to_print):\n\n print(\"=========== REPORT ============\")\n for stock in stocks_to_print:\n stock.print_one_line_report()", "def report(s):\n if opts[\"verbose\"]:\n print(\"%s: %s\" % (NAME, s))", "def printSearchResults(results):\n Log.Debug('Search produced %d results:' % len(results))\n index = 0\n for result in results:\n Log.Debug(' ... %d: id=\"%s\", name=\"%s\", year=\"%s\", score=\"%d\".' %\n (index, result.id, result.name, str(result.year), result.score))\n index += 1" ]
[ "0.7021992", "0.6844773", "0.6789687", "0.66836685", "0.66819364", "0.65366095", "0.65018225", "0.648304", "0.64820206", "0.64493674", "0.64239174", "0.64180636", "0.6405773", "0.632852", "0.62888855", "0.62459767", "0.6233217", "0.6169831", "0.61656654", "0.6095023", "0.6093647", "0.60828614", "0.6070094", "0.6066862", "0.60517144", "0.6007968", "0.6006362", "0.600469", "0.59762883", "0.5926914" ]
0.7143622
0
Create the perspective in a workbench window. For most cases you should just be able to set the 'contents' trait to lay out views as required. However, you can override this method if you want to have complete control over how the perspective is created.
def create(self, window): # Set the size of the editor area. if self.editor_area_size != (-1, -1): window.editor_area_size = self.editor_area_size # If the perspective has specific contents then add just those. if len(self.contents) > 0: self._add_contents(window, self.contents) # Otherwise, add all of the views defined in the window at their # default positions realtive to the editor area. else: self._add_all(window) # Activate the first view in every region. window.reset_views() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(self):\n\n cv2.namedWindow(winname=self.title, flags=self.style)", "def create(self, verbose=False):\r\n # delete the window if its handle exists\r\n if cmds.window(self.window, exists=True):\r\n cmds.deleteUI(self.window)\r\n # initialize the window as a pane for docking\r\n self.window = cmds.loadUI(uiFile=self.uiFile, verbose=verbose)\r\n #layoutWin = cmds.paneLayout(configuration='single')\r\n # create a dockControl and parent the control to layoutWin\r\n cmds.dockControl(allowedArea='all', area='right', floating=False, \r\n height=cmds.window(self.window, query=True, height=True), \r\n content=self.window, label='Docked Cone Pointer Window')\r\n cmds.showWindow(self.window)", "def _make_view(tabbed=False, split=False, scene_width=-1):\n view_options = VGroup(Item('headview', style='custom'), 'view_options',\n show_border=True, show_labels=False, label='View')\n\n scene = VGroup(Item('scene', show_label=False,\n editor=SceneEditor(scene_class=MayaviScene),\n dock='vertical', width=500),\n view_options)\n\n data_panel = VGroup(VGroup(Item('subject_panel', style='custom'),\n label=\"MRI Subject\", show_border=True,\n show_labels=False),\n VGroup(Item('lock_fiducials', style='custom',\n editor=EnumEditor(cols=2,\n values={False: '2:Edit',\n True: '1:Lock'}),\n enabled_when='fid_ok'),\n HGroup('hsp_always_visible',\n Label(\"Always Show Head Shape Points\"),\n show_labels=False),\n Item('fid_panel', style='custom'),\n label=\"MRI Fiducials\", show_border=True,\n show_labels=False),\n VGroup(Item('raw_src', style=\"custom\"),\n HGroup(Item('distance', show_label=True),\n 'omit_points', 'reset_omit_points',\n show_labels=False),\n Item('omitted_info', style='readonly',\n show_label=False),\n label='Head Shape Source (Raw)',\n show_border=True, show_labels=False),\n show_labels=False, label=\"Data Source\")\n\n coreg_panel = VGroup(Item('coreg_panel', style='custom'),\n label=\"Coregistration\", show_border=True,\n show_labels=False,\n enabled_when=\"fid_panel.locked\")\n\n if split:\n main_layout = 'split'\n else:\n main_layout = 'normal'\n\n if tabbed:\n main = HGroup(scene,\n Group(data_panel, coreg_panel, show_labels=False,\n layout='tabbed'),\n layout=main_layout)\n else:\n main = HGroup(data_panel, scene, coreg_panel, show_labels=False,\n layout=main_layout)\n\n view = View(main, resizable=True, handler=CoregFrameHandler(),\n buttons=NoButtons)\n return view", "def maya_window():\n return to_qwidget(\"MayaWindow\")", "def showUI(cls):\r\n win = cls()\r\n win.create()\r\n return win", "def __newDocumentView(self):\n aw = self.activeWindow()\n if aw:\n self.newEditorView(aw.getFileName(), aw, aw.getFileType())", "def preview(self):\n toplevel = self._get_toplevel()\n if toplevel is not None:\n toplevel.preview(refresh=True)", "def create_screen(self, width, height):", "def SavePerspective(self):\r\n \r\n # Build list of panes/tabs\r\n tabs = \"\"\r\n all_panes = self._mgr.GetAllPanes()\r\n \r\n for pane in all_panes:\r\n\r\n if pane.name == \"dummy\":\r\n continue\r\n\r\n tabframe = pane.window\r\n \r\n if tabs:\r\n tabs += \"|\"\r\n \r\n tabs += pane.name + \"=\"\r\n \r\n # add tab id's\r\n page_count = tabframe._tabs.GetPageCount()\r\n \r\n for p in xrange(page_count):\r\n \r\n page = tabframe._tabs.GetPage(p)\r\n page_idx = self._tabs.GetIdxFromWindow(page.window)\r\n \r\n if p:\r\n tabs += \",\"\r\n\r\n if p == tabframe._tabs.GetActivePage():\r\n tabs += \"+\"\r\n elif page_idx == self._curpage:\r\n tabs += \"*\"\r\n \r\n tabs += \"%u\"%page_idx\r\n \r\n tabs += \"@\"\r\n\r\n # Add frame perspective\r\n tabs += self._mgr.SavePerspective()\r\n\r\n return tabs", "def pyvista_render_window():\n from pyvista import examples\n globe = examples.load_globe() #add texture\n pl = pv.Plotter()\n pl.add_mesh(globe)\n sphere = pv.Sphere()\n scalars=sphere.points[:, 2]\n sphere._add_point_array(scalars, 'test', set_active=True) #allow to test scalars\n pl.add_mesh(sphere)\n return pl.ren_win", "def start(self):\n self.delegate.start_preview(fullscreen=False, window = (350, 10, self.size[0] - 350, self.size[1] - 10))", "def _add_perspective_item(self, window, item):\r\n\r\n # If no 'relative_to' is specified then the view is positioned\r\n # relative to the editor area.\r\n if len(item.relative_to) > 0:\r\n relative_to = window.get_view_by_id(item.relative_to)\r\n \r\n else:\r\n relative_to = None\r\n\r\n # fixme: This seems a bit ugly, having to reach back up to the\r\n # window to get the view. Maybe its not that bad?\r\n view = window.get_view_by_id(item.id)\r\n if view is not None:\r\n # fixme: This is probably not the ideal way to sync view traits\r\n # and perspective_item traits.\r\n view.style_hint = item.style_hint\r\n # Add the view to the window.\r\n window.add_view(\r\n view, item.position, relative_to, (item.width, item.height)\r\n )\r\n\r\n else:\r\n # The reason that we don't just barf here is that a perspective\r\n # might use views from multiple plugins, and we probably want to\r\n # continue even if one or two of them aren't present.\r\n #\r\n # fixme: This is worth keeping an eye on though. If we end up with\r\n # a strict mode that throws exceptions early and often for\r\n # developers, then this might be a good place to throw one ;^)\r\n logger.error('missing view for perspective item <%s>' % item.id)\r\n\r\n return", "def __init_window(self) -> pygame.Surface:\n pygame.display.set_caption(CAPTION)\n win = pygame.display.set_mode((WIDTH, HEIGHT))\n \n return win", "def createUI(self):\n\n q.getQItem(windowID, QtWidgets.QWidget)\n cmds.setParent(q.fullPath)\n\n # ################################################\n # Active Render Layer\n\n # cmds.separator(height=12, style='none')\n addFrameLayout(\n '%s_frameLayoutLayers' % windowID,\n 'Visible Render Layer', collapsable=False,\n labelVisible=False,\n marginHeight=0\n )\n\n addRowLayout(\n '%s_rowLayoutActiveRenderLayer' % windowID,\n 4,\n columnAlign4=('left', 'left', 'right', 'right'),\n columnAttach4=('left', 'both', 'right', 'right'),\n columnWidth4=(\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.775,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075\n )\n )\n\n\n addButton('%s_addNewLayer' % windowID, 'New', rsAddNewLayer,\n image='RS_create_layer', size=(21, 21))\n addOptionMenu('%s_selectActiveLayer' % windowID,\n 'Active Layer ', (), rsSelectActiveLayer)\n addButton('rsOpenRenderSetupWindow', 'Render Setup',\n rsOpenRenderSetupWindow, image='render_setup.png',\n size=(21, 21))\n addButton('rsOpenUnifiedRenderGlobals', 'Render Globals',\n rsOpenUnifiedRenderGlobals, image='render_setup.png',\n size=(21, 21))\n\n # ################################################\n # Work Render Layers\n\n cmds.setParent(q.fullPath)\n addFrameLayout('%s_frameLayoutLayersB' % windowID,\n 'Work Render Layer', collapsable=False,\n labelVisible=False, marginHeight=0)\n addRowLayout('%s_rowLayoutVisibleRenderLayer' % windowID, 3,\n columnAlign3=('left', 'left', 'right'),\n columnAttach3=('left', 'both', 'right'),\n columnWidth3=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.075, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.85,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075))\n\n cmds.separator()\n addOptionMenu('%s_selectVisibleLayer' % windowID,\n 'Visible Layer ', (), rsSelectVisibleLayer)\n cmds.separator()\n\n cmds.setParent(q.fullPath)\n cmds.separator(height=12, style='none')\n\n # ################################################\n # Collections\n\n addFrameLayout('%s_frameLayout02' % windowID, 'Collections',\n labelVisible=False, marginHeight=0)\n\n addRowLayout(\n '%s_rowLayout02' % windowID,\n 6,\n columnAlign6=('left', 'left', 'left', 'left', 'left', 'left'),\n columnAttach6=('both', 'both', 'right', 'right', 'right', 'right'),\n columnWidth6=(\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.18,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.18,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.415,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075,\n )\n )\n\n addButton('rsAddCollection', 'Add', rsAddCollection)\n addButton('rsRemoveCollection', 'Remove', rsRemoveCollection)\n addButton('rsSelectShapes', 'Select Shapes', rsSelectShapes,\n image='selectObject.png', size=(21, 21))\n addButton('rsRenameShader', 'Rename Shader', rsRenameShader,\n size=(21, 21), image='QR_rename.png')\n addButton('rsDuplicateShader', 'Duplicate Shader',\n duplicateShader, size=(21, 21), image='newPreset.png')\n addButton('rsRefreshUI', 'Refresh', rsRefreshUI, size=(21, 21),\n image='QR_refresh.png')\n\n # ###########################\n # Filter List\n\n cmds.setParent('%s_frameLayout02' % windowID)\n addRowLayout('%s_rowLayout03' % windowID, 2,\n columnAlign2=('left', 'left'),\n columnAttach2=('both', 'both'),\n columnWidth2=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.6, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.42))\n\n addTextField('%s_filterShaderList' % windowID, 'Search',\n rsFilterShaderList_off, rsFilterShaderList_off,\n window.updateUI)\n addOptionMenu('rsShaderGroups', '|', (), rsShaderGroups)\n\n # ###########################\n # The shaders scroll list\n\n cmds.setParent('%s_frameLayout02' % windowID)\n addRowLayout('%s_rowLayout04' % windowID, 1, columnAlign1='both', columnAttach1='both', columnWidth1=WINDOW_WIDTH\n + 12)\n addTextScrollList('%s_ShaderScrollList' % windowID, (),\n rsShaderScrollList_doubleClick,\n rsShaderScrollList_onSelect,\n rsShaderScrollList_deleteKey)\n\n # Add popup menu:\n\n cmds.popupMenu('rsShaderScrollListPopupMenu',\n parent='%s_ShaderScrollList' % windowID,\n allowOptionBoxes=False, markingMenu=True,\n postMenuCommand=postMenuCommand)\n cmds.menuItem('%s_popupMenuItem02' % windowID,\n label='Duplicate Shader', command=duplicateShader)\n cmds.menuItem(divider=True)\n cmds.menuItem('%s_popupMenuItem04' % windowID,\n label='Graph Shader')\n cmds.menuItem(divider=True)\n cmds.menuItem('%s_popupMenuItem03' % windowID,\n label='Select Shader')\n cmds.menuItem(divider=True)\n cmds.menuItem('%s_popupMenuItem05' % windowID,\n label='Select Assigned Shapes')\n cmds.menuItem('%s_popupMenuItem06' % windowID,\n label='Select Assigned Transforms')\n\n # ##################################################\n # Arnold Property Overrides\n\n cmds.setParent('%s_frameLayout02' % windowID)\n cmds.columnLayout(\n '%s_columnLayout20' % windowID,\n width=WINDOW_WIDTH - FRAME_MARGIN * 2,\n columnAlign='left',\n columnAttach=('left', 0),\n adjustableColumn=False,\n rowSpacing=0,\n )\n\n cmds.separator(parent='%s_columnLayout20' % windowID, height=4,\n style='none')\n\n addRowLayout('%s_rowLayout05' % windowID, 2,\n columnAlign2=('left', 'both'),\n columnAttach2=('left', 'right'),\n columnWidth2=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.75, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.25))\n addText('%s_textArnoldPropertyOverridesLabel' % windowID,\n 'Apply Arnold Property Overrides', 'plainLabelFont')\n addCheckBox('rsArnoldPropertyOverridesCheckBox', '',\n rsArnoldPropertyOverridesCheckBox,\n rsArnoldPropertyOverridesCheckBox)\n cmds.separator(parent='%s_columnLayout20' % windowID, height=4,\n style='none')\n\n # Column Layout to toggle\n\n cmds.setParent('%s_columnLayout20' % windowID)\n cmds.columnLayout(\n '%s_columnLayout02' % windowID,\n width=WINDOW_WIDTH - FRAME_MARGIN * 2,\n columnAlign='left',\n columnAttach=('left', 0),\n adjustableColumn=False,\n rowSpacing=0,\n )\n\n addCheckboxes('%s_columnLayout02' % windowID)\n cmds.columnLayout('%s_columnLayout02' % windowID, edit=True,\n visible=False)\n\n # #################################################\n # Shader Override\n\n cmds.setParent('%s_frameLayout02' % windowID)\n cmds.columnLayout(\n '%s_columnLayout21' % windowID,\n width=WINDOW_WIDTH - FRAME_MARGIN * 2,\n columnAlign='left',\n columnAttach=('left', 0),\n adjustableColumn=False,\n rowSpacing=0,\n )\n cmds.separator(parent='%s_columnLayout21' % windowID, height=4,\n style='none')\n addRowLayout('%s_rowLayout06' % windowID, 2,\n columnAlign2=('left', 'right'),\n columnAttach2=('left', 'right'),\n columnWidth2=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.75, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.25))\n addText('%s_shaderOverrideLabel' % windowID, 'Shader Override',\n 'plainLabelFont')\n addCheckBox('%s_shaderOverrideCheckbox' % windowID, '',\n rsShaderOverrideCheckbox, rsShaderOverrideCheckbox)\n cmds.separator(parent='%s_columnLayout21' % windowID, height=4,\n style='none')\n\n cmds.setParent('%s_columnLayout21' % windowID)\n cmds.columnLayout(\n '%s_columnLayout03' % windowID,\n width=WINDOW_WIDTH - FRAME_MARGIN * 2,\n columnAlign='left',\n columnAttach=('both', 4),\n adjustableColumn=True,\n rowSpacing=0,\n )\n cmds.setParent('%s_columnLayout03' % windowID)\n addOptionMenu('%s_optionMenu02' % windowID, 'Select: ', (),\n rsShaderOverridesMenu)\n\n global selectedShaderOverride\n\n # default selection\n\n selectedShaderOverride = SHADER_OVERRIDE_OPTIONS[0]['ui']\n cmds.columnLayout('%s_columnLayout03' % windowID, edit=True,\n visible=False)\n\n # #################################################\n\n cmds.setParent(q.fullPath)\n cmds.separator(height=10, style='none')\n\n # #################################################\n # Extras\n\n addFrameLayout('%s_frameLayout50' % windowID, 'Extras',\n collapsable=True, marginHeight=0,\n labelVisible=False)\n\n # #################################################\n # Add & Assign Shader Groups\n\n addFrameLayout(\n '%s_frameLayout05' % windowID,\n 'Add & Assign Shader Groups',\n collapsable=True,\n marginWidth=0,\n marginHeight=0,\n collapse=False,\n labelVisible=True,\n )\n\n # Add the renamer window\n\n self.gwCustomRenamer = CustomRenamer()\n self.gwCustomRenamer.createUI()\n\n # #################################################\n # AutoConnect\n\n cmds.setParent('%s_frameLayout50' % windowID)\n\n addFrameLayout(\n '%s_frameLayout03' % windowID,\n 'Adobe Connector',\n collapsable=True,\n marginWidth=0,\n marginHeight=0,\n collapse=True,\n labelVisible=True,\n )\n addRowLayout('%s_rowLayout07', 3, columnAlign3=('left', 'left',\n 'left'), columnAttach3=('both', 'both', 'both'),\n columnWidth3=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.4, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.3,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.3))\n addButton('updateConnections', '> Update Connections <',\n updateConnections)\n addButton('uvSnapshot', 'UV Snapshot', uvSnapshot)\n addButton('editTexture', 'Edit Texture', editTexture)\n\n # After Effects\n\n cmds.setParent('%s_frameLayout03' % windowID)\n addRowLayout('%s_rowLayout11' % windowID, 2,\n columnAlign2=('left', 'left'),\n columnAttach2=('both', 'both'),\n columnWidth2=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.4, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.6))\n addText('%s_text90' % windowID, 'Send to After Effects:')\n addButton('makeCompButton', 'Send to After Effects', rsMakeComp)\n\n # #################################################\n # Render Setup /\n # Output settings\n\n cmds.setParent('%s_frameLayout50' % windowID)\n addFrameLayout(\n '%s_frameLayout04' % windowID,\n 'Output Settings',\n collapsable=True,\n marginWidth=0,\n marginHeight=0,\n collapse=True,\n labelVisible=True,\n )\n addRowLayout('%s_rowLayout08' % windowID, 1,\n columnAlign1='center', columnAttach1='both',\n columnWidth1=WINDOW_WIDTH - FRAME_MARGIN * 2)\n addButton('%s_revealOutputDirectory' % windowID,\n 'Output path not set yet', rsRevealOutputDirectory)\n\n cmds.setParent('%s_frameLayout04' % windowID)\n addRowLayout('%s_rowLayout09' % windowID, 3,\n columnAlign3=('left', 'right', 'right'),\n columnAttach3=('left', 'right', 'right'),\n columnWidth3=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.8, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.14,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.06))\n\n addOptionMenu('%s_optionMenu05' % windowID, '', (),\n rsSelectOutputTemplate)\n addOptionMenu('%s_outputVersionMenu' % windowID, '', (),\n rsSelectOutputVersion)\n cmds.menuItem(label='v001')\n\n cmds.setParent('%s_rowLayout09' % windowID)\n addButton('%s_incrementOutputVersionButton' % windowID, '+1',\n rsIncrementOutputVersion, size=(21, 21))\n\n cmds.setParent('%s_frameLayout04' % windowID)\n addRowLayout('%s_rowLayout10' % windowID, 2,\n columnAlign2=('left', 'left'),\n columnAttach2=('both', 'right'),\n columnWidth2=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.7, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.3))\n addOptionMenu('%s_optionMenu03' % windowID, 'Format:', (),\n rsOutputTemplatesMenu)\n addOptionMenu('%s_optionMenu06' % windowID, '', (),\n rsSetFPSMenu)\n\n cmds.setParent('%s_frameLayout04' % windowID)\n addRowLayout('%s_rowLayout12' % windowID, 4,\n columnAlign4=('right', 'left', 'right', 'left'),\n columnAttach4=('both', 'both', 'both', 'both'),\n columnWidth4=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.50, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.15,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.20,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.15))\n\n addText('%s_setInFrameLabel' % windowID, 'In Frame ')\n addTextField('%s_setInFrame' % windowID, '', setInFrame,\n setInFrame, setInFrame)\n\n addText('%s_setOutFrameLabel' % windowID, 'Out Frame ')\n addTextField('%s_setOutFrame' % windowID, '', setOutFrame,\n setOutFrame, setOutFrame)", "def SetupView(self):\r\n size = self.GetClientSizeTuple()\r\n height = self.maxtop - self.maxbottom\r\n width = self.maxright - self.maxleft\r\n \r\n #The ratio of the width to the height in the client-area\r\n screenratio = float(size[0]) / float(size[1])\r\n \r\n #The ratio of the world window. Because of divide-by-0, we have to make a special-case assignment\r\n if height == 0 or width == 0:\r\n ratio = screenratio\r\n else:\r\n ratio = width / height\r\n\r\n #Should seem familiar, since we did it in class...\r\n if ratio > screenratio:\r\n glViewport(0, (size[1] - (size[0] / ratio)) / 2, size[0], size[0] / ratio)\r\n if ratio < screenratio:\r\n glViewport((size[0] - size[1] * ratio) / 2, 0, size[1] * ratio, size[1])\r\n \r\n \r\n #I need to find an appropriate border value. It's scaled by the client-area because the world-window zooms, thus skewing any normal border given.\r\n if width == 0 or height == 0:\r\n xborder = 1\r\n yborder = 1\r\n else:\r\n xscale = size[0] / width\r\n xborder = 10 / xscale\r\n yscale = size[1] / height\r\n yborder = 10 / yscale\r\n \r\n glMatrixMode(GL_PROJECTION)\r\n glLoadIdentity()\r\n gluOrtho2D(self.maxleft - xborder, self.maxright + xborder, self.maxbottom - yborder, self.maxtop + yborder)", "def create_main_enviroment(self):\n # self.layout=QGridLayout()\n self.resize(900, 900)\n self.centralWidget = CentralWidget(self) # CentralWidget(self)\n self.setCentralWidget(self.centralWidget)\n\n # self.toolbar = QToolBar(self)\n # self.addToolBar(self.toolbar)\n\n # self.setLayout(self.layout)\n self.setWindowTitle(\"Fitting elastic constants\")", "def show_to_window(self):\n if self.normal_mode:\n self.show_image.show_original_image(\n self.image, self.width_original_image)\n self.show_image.show_result_image(\n self.image, self.width_result_image, self.angle)\n\n else:\n if self.panorama_mode:\n image = draw_polygon(\n self.image.copy(),\n self.mapX_pano,\n self.mapY_pano)\n mapX = np.load(\n './plugins/Thread_inspection/view_image/maps_pano/mapX.npy')\n mapY = np.load(\n './plugins/Thread_inspection/view_image/maps_pano/mapY.npy')\n rho = self.panorama.rho\n\n self.result_image = cv2.remap(\n self.image,\n mapX,\n mapY,\n cv2.INTER_CUBIC)\n self.result_image = self.result_image[round(\n rho + round(self.moildev.getRhoFromAlpha(30))):self.h, 0:self.w]\n # print(self.width_result_image)\n else:\n image = draw_polygon(self.image.copy(), self.mapX, self.mapY)\n self.result_image = cv2.remap(\n self.image,\n self.mapX,\n self.mapY,\n cv2.INTER_CUBIC)\n self.show_image.show_original_image(\n image, self.width_original_image)\n self.show_image.show_result_image(\n self.result_image, self.width_result_image, self.angle)", "def setup_window(self, fullscreen, dual):\n cv2.startWindowThread()\n if fullscreen:\n cv2.namedWindow(self.wname, cv2.WINDOW_NORMAL)\n else:\n cv2.namedWindow(self.wname)\n cv2.namedWindow(self.wname)\n cv2.setWindowProperty(self.wname, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n\n if dual:\n # Move is to make sure it's on the right monitor\n cv2.moveWindow(self.wname, 1920, 0)\n cv2.namedWindow(self.wname + ' Small View')\n cv2.resizeWindow(self.wname + ' Small View', 960, 540)", "def createAboutWindow(self):\n if (not hasattr(self, \"about_window\")):\n self.about_window = AboutWindow(self)\n self.about_window.show()", "def open_viewer(self):\r\n choice = self.thoughts_lst.get(tk.ACTIVE)\r\n subject = self.refference[choice]\r\n tbl = self.home_table[subject]\r\n view = kit.SQL_pull('*', tbl, 'subject_id = \"{}\"'.format(subject))\r\n obj = kit.class_fill(tbl, view[0])\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n jv.Viewer(self.session, obj)", "def createWindow(self):\n\n # create window, set basic attributes\n w = gtk.Window(gtk.WINDOW_TOPLEVEL)\n w.set_size_request(*self.__def_win_size__)\n w.set_decorated(False)\n #w.fullscreen()\n #w.unfullscreen()\n w.set_title(self.__name__)\n w.connect(\"destroy\", gtk.main_quit)\n\n # declare buttons and their associated handlers\n controls = (\n (\"open_button\", gtk.ToolButton(gtk.STOCK_OPEN), self.onPlay),\n (\"play_button\", gtk.ToolButton(gtk.STOCK_MEDIA_PLAY), self.onPlay),\n (\"stop_button\", gtk.ToolButton(gtk.STOCK_MEDIA_STOP), self.onStop),\n (\"quit_button\", gtk.ToolButton(gtk.STOCK_QUIT), gtk.main_quit)\n )\n\n # as well as the container in which to put them\n box = gtk.HButtonBox()\n\n # for every widget, connect to its clicked signal and add it\n # to the enclosing box\n for name, widget, handler in controls:\n widget.connect(\"clicked\", handler)\n box.pack_start(widget, True)\n setattr(self, name, widget)\n\n viewer = gtk.DrawingArea()\n viewer.modify_bg(gtk.STATE_NORMAL, viewer.style.black)\n\n # we will need this later\n self.xid = None\n\n # now finally do the top-level layout for the window\n layout = gtk.VBox(False)\n layout.pack_start(viewer)\n\n # subclasses can override childWidgets() to supply\n # custom controls\n layout.pack_start(self.customWidgets(), False, False)\n layout.pack_end(box, False, False)\n w.add(layout)\n w.show_all()\n\n # we want to return only the portion of the window which will\n # be used to display the video, not the whole top-level\n # window. a DrawingArea widget is, in fact, an X11 window.\n return viewer", "def create_frame_viewer(self):\n if not hasattr(self, \"frame_viewer1\"):\n self.frame_viewer1 = dialog.FrameViewer()\n self.frame_viewer1.setWindowTitle(\"Frame viewer #1\")\n self.frame_viewer1.setWindowFlags(Qt.WindowStaysOnTopHint)\n if self.frame_viewer1_mem_geometry:\n self.frame_viewer1.setGeometry(self.frame_viewer1_mem_geometry)\n else:\n self.frame_viewer1.setGeometry(100, 100, 1024, 768)\n\n if self.second_player():\n if not hasattr(self, \"frame_viewer2\"):\n self.frame_viewer2 = dialog.FrameViewer()\n self.frame_viewer2.setWindowTitle(\"Frame viewer #2\")\n self.frame_viewer2.setWindowFlags(Qt.WindowStaysOnTopHint)\n if self.frame_viewer2_mem_geometry:\n self.frame_viewer2.setGeometry(self.frame_viewer2_mem_geometry)\n else:\n self.frame_viewer2.setGeometry(150, 150, 1024, 768)", "def studio_preview_view(self, context):\r\n fragment = Fragment()\r\n self.render_reorderable_children(context, fragment)\r\n return fragment", "def studio_preview_view(self, context):\r\n fragment = Fragment()\r\n contents = []\r\n\r\n for child in self.descriptor.get_children():\r\n rendered_child = self.runtime.get_module(child).render('student_view', context)\r\n fragment.add_frag_resources(rendered_child)\r\n\r\n contents.append({\r\n 'id': child.location.to_deprecated_string(),\r\n 'content': rendered_child.content\r\n })\r\n\r\n fragment.add_content(self.system.render_template('vert_module.html', {\r\n 'items': contents\r\n }))\r\n\r\n return fragment", "def __init__(self,currentExperiment):\n super(AmoebaCreateExperimentWindow,self).__init__()\n\n self.currentExperiment = currentExperiment\n\n #Create the window\n self.subWindow = QMdiSubWindow()\n\n self.widget = AmoebaCreateExperiment(self.subWindow,self.currentExperiment)\n \n #Create the UI.\n self.setWindowTitle(\"Create a new experiment.\")\n\n self.scroll = QScrollArea()\n\n self.scroll.setMinimumWidth(270)\n self.scroll.setWidget(self.widget)\n self.scroll.setWidgetResizable(True)\n\n #Connect button to next function.\n self.subWindow.setWidget(self.scroll)", "def _create_window(self):\n self.window = Gtk.Window()\n self.window.set_title(\"Yapsy Example\")\n self.window.set_default_size(400, 400)\n self.window.connect(\"destroy\", lambda w: Gtk.main_quit())\n # PluginList() is a composite widget that shows all installed plugins\n # in a Gtk.TreeView. See widgets.py\n self._plugin_list = PluginList(self.window)\n box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n box.pack_start(self._plugin_list, True, True, 0)\n box.show_all()\n self.window.add(box)", "def perspective(self, perspective):\n\n self.container['perspective'] = perspective", "def createGameWindow():\n gameWindow = g.GraphWin(\"game\", 450, 800) #Window to show game\n\n return gameWindow", "def _add_contents(self, window, contents):\r\n\r\n # If we are adding specific contents then we ignore any default view\r\n # visibility.\r\n #\r\n # fixme: This is a bit ugly! Why don't we pass the visibility in to\r\n # 'window.add_view'?\r\n for view in window.views:\r\n view.visible = False\r\n \r\n for item in contents:\r\n self._add_perspective_item(window, item)\r\n \r\n return", "def show(self):\n if AMOEBA_CREATE_EXPERIMENT_DEBUG:\n print \"Show Window.\"\n self.subWindow.show()" ]
[ "0.6131212", "0.5922545", "0.58861494", "0.5807673", "0.57519644", "0.5720677", "0.56706893", "0.56655294", "0.56585646", "0.5613341", "0.5611122", "0.5565591", "0.5520122", "0.55085135", "0.5473944", "0.54577786", "0.54514945", "0.54325914", "0.54226893", "0.54205424", "0.5417984", "0.54119235", "0.5404403", "0.5394007", "0.53751236", "0.5334031", "0.5330257", "0.5322983", "0.53160393", "0.53150403" ]
0.7491928
0
Called when the perspective is shown in a workbench window. The default implementation does nothing, but you can override this method if you want to do something whenever the perspective is activated.
def show(self, window): return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rendererWindowActivated(self, sw):\n pass", "def show(self):\n if AMOEBA_CREATE_EXPERIMENT_DEBUG:\n print \"Show Window.\"\n self.subWindow.show()", "def show_window(self):\n self.show()", "def show(self):\n # * displays the window, after using either the iconify or the withdraw methods\n self.wm_deiconify()\n # * this method can be called after the event which needs to happen before the window event\n self.wait_window()", "def ev_windowshown(self, event: WindowEvent) -> None:", "def start(self):\n self.delegate.start_preview(fullscreen=False, window = (350, 10, self.size[0] - 350, self.size[1] - 10))", "def show(self,window):\n self.showFunctions(window)", "def on_activate(self, caller):\n self.window = GameWindow()\n self.add_window(self.window)", "def on_show_view(self):\n self.setup()", "def on_show_view(self):\n self.setup()", "def on_show_view(self):\n self.setup()", "def show(self):\r\n self.wf.Show()", "def on_show_view(self):\n self.window.background_color = arcade.color.WHITE", "def show(self):\r\n wlight.lightController.redraw()", "def on_show_view(self):\n self.window.background_color = arcade.color.BLACK", "def show(self):\n self._window.show()", "def show(self):\n # This function has to be placed here (and not in the user.py script)\n self.showMaximized()\n visapp.run()", "def _showView(self, win, fn=None):\n raise RuntimeError('Not implemented')", "def onWorkbench():\n workbench = Gui.activeWorkbench().__class__.__name__\n\n if layout:\n while not layout.isEmpty():\n item = layout.takeAt(0)\n del item\n\n buttons = cpcmd.workbenchButtons(workbench)\n\n if p.GetString(\"Layout\") == \"Grid\":\n columns = p.GetInt(\"ColumnNumber\", 1) - 1\n x = 0\n y = 0\n for btn in buttons:\n if y > columns:\n y = 0\n x += 1\n layout.addWidget(btn, x, y)\n y += 1\n else:\n for btn in buttons:\n layout.addWidget(btn)", "def show(self):\n self.wid.show()", "def _handler_default_view(self, event):\n self._mgr.LoadPerspective(\n self._perspectives['default'])", "def present(self):\n if self.isWindow :\n self.present(self)\n else :\n assert hasattr(self, 'window'), \\\n \"ManagedWindow: self.window does not exist!\"\n self.window.present()", "def on_window_ready(self):\n pass", "def show(self):\n self.window.run_command(\"show_panel\", {\"panel\": self.full_name})", "def do_activate(self):\n\n Gtk.Application.do_activate(self)\n self.initiate_plugins()\n self.other[\"menu_button\"].set_menu_model(self.prepare_menu())\n self.output_window.show_all()\n self.window.show_all()", "def showWindow(*args, **kwargs)->None:\n pass", "def on_action_9_triggered(self):\n # TODO: not implemented yet\n print('全屏')\n self.showFullScreen()", "def show(self):\r\n display(self.grid_part)", "def show(self):\n self.scene().show()", "def preview(self):\n toplevel = self._get_toplevel()\n if toplevel is not None:\n toplevel.preview(refresh=True)" ]
[ "0.636253", "0.6302572", "0.61699057", "0.61239356", "0.6110351", "0.6086942", "0.60822815", "0.59712666", "0.59361935", "0.59361935", "0.59361935", "0.59168285", "0.59083813", "0.5892436", "0.585637", "0.58189636", "0.57990617", "0.5798876", "0.5788901", "0.57813394", "0.5768827", "0.57307076", "0.5726389", "0.5694405", "0.56858987", "0.56683695", "0.5653989", "0.5650604", "0.56465375", "0.563295" ]
0.6407262
0
Adds a perspective item to a window.
def _add_perspective_item(self, window, item): # If no 'relative_to' is specified then the view is positioned # relative to the editor area. if len(item.relative_to) > 0: relative_to = window.get_view_by_id(item.relative_to) else: relative_to = None # fixme: This seems a bit ugly, having to reach back up to the # window to get the view. Maybe its not that bad? view = window.get_view_by_id(item.id) if view is not None: # fixme: This is probably not the ideal way to sync view traits # and perspective_item traits. view.style_hint = item.style_hint # Add the view to the window. window.add_view( view, item.position, relative_to, (item.width, item.height) ) else: # The reason that we don't just barf here is that a perspective # might use views from multiple plugins, and we probably want to # continue even if one or two of them aren't present. # # fixme: This is worth keeping an eye on though. If we end up with # a strict mode that throws exceptions early and often for # developers, then this might be a good place to throw one ;^) logger.error('missing view for perspective item <%s>' % item.id) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def perspective(self, perspective):\n\n self.container['perspective'] = perspective", "def create(self, window):\r\n\r\n # Set the size of the editor area.\r\n if self.editor_area_size != (-1, -1):\r\n window.editor_area_size = self.editor_area_size\r\n\r\n # If the perspective has specific contents then add just those.\r\n if len(self.contents) > 0:\r\n self._add_contents(window, self.contents)\r\n\r\n # Otherwise, add all of the views defined in the window at their\r\n # default positions realtive to the editor area.\r\n else:\r\n self._add_all(window)\r\n\r\n # Activate the first view in every region.\r\n window.reset_views()\r\n \r\n return", "def _add_contents(self, window, contents):\r\n\r\n # If we are adding specific contents then we ignore any default view\r\n # visibility.\r\n #\r\n # fixme: This is a bit ugly! Why don't we pass the visibility in to\r\n # 'window.add_view'?\r\n for view in window.views:\r\n view.visible = False\r\n \r\n for item in contents:\r\n self._add_perspective_item(window, item)\r\n \r\n return", "def popup_add(self, event):\n def callb():\n PhysicsWindow.AddObjectWindow(self.window, event)\n return callb", "def _add_view(self, window, view):\r\n\r\n # If no 'relative_to' is specified then the view is positioned\r\n # relative to the editor area.\r\n if len(view.relative_to) > 0:\r\n relative_to = window.get_view_by_id(view.relative_to)\r\n \r\n else:\r\n relative_to = None\r\n\r\n # Add the view to the window.\r\n window.add_view(\r\n view, view.position, relative_to, (view.width, view.height)\r\n )\r\n\r\n return", "def OnConnect(self, perspective):\r\n\t\tself.perspective = perspective\r\n\t\tself.onConnect()", "def project(self, win_width, win_height, fov, viewer_distance):\r\n factor = fov / (viewer_distance + self.z)\r\n x = self.x * factor + win_width / 2\r\n y = -self.y * factor + win_height / 2\r\n return Point3D(x, y, 1)", "def add(self):\n\n self.scene.projs.add(self)\n self.scene.all.add(self.scene.projs)\n self.rotate()", "def SetItemWindow(self, item, window, column=None):\r\n\r\n # Reparent the window to ourselves\r\n if window.GetParent() != self:\r\n window.Reparent(self)\r\n \r\n item.SetWindow(window, column)\r\n if window:\r\n self._hasWindows = True", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, 1)", "def project(self, win_width, win_height, fov, viewer_distance):\n\t\tfactor = fov / (viewer_distance + self.z)\n\t\tx = self.x * factor + win_width / 2\n\t\ty = -self.y * factor + win_height / 2\n\t\treturn Point3D(x, y, 1)", "def add_window(self, window):\n if not self.valid_window(window):\n return False\n self.windows.append(window)\n window.tiler = self\n if window not in self.start_positions.keys():\n self.start_positions[window] = window.display_size\n\n rules = config.GET_RULES(window.classname)\n if rules is not None and re.search(rules[\"regex\"], window.title) is not None:\n if \"floating\" in rules:\n window.set_floating(rules[\"floating\"])\n if \"decorated\" in rules:\n if bool(rules[\"decorated\"]):\n window.enable_decoration()\n else:\n window.disable_decoration()\n if \"position\" in rules:\n window.move_to(tuple(rules[\"position\"]))\n\n print(\"Added window: {0}\".format(window))\n window.print_window_styles()\n return True", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)", "def SetItemWindow(self, item, wnd):\r\n\r\n if wnd is not None:\r\n self._hasWindows = True\r\n if item not in self._itemWithWindow:\r\n self._itemWithWindow.append(item)\r\n else:\r\n self.DeleteItemWindow(item)\r\n else:\r\n self.DeleteItemWindow(item)\r\n \r\n item.SetWindow(wnd)\r\n self.CalculatePositions()\r\n self.Refresh()\r\n self.AdjustMyScrollbars()", "def rsAddNewLayer(item):\n\n WIDTH = WINDOW_WIDTH * (float(4) / 5)\n OFFSET = WINDOW_WIDTH * (float(1) / 5)\n HEIGHT = 75\n if cmds.window(windowNewLayerID, exists=True):\n cmds.deleteUI(windowNewLayerID)\n\n cmds.window(\n windowNewLayerID,\n sizeable=False,\n title=windowNewLayerTitle,\n iconName=windowNewLayerTitle,\n width=WIDTH,\n height=HEIGHT,\n )\n\n def rsuNewLayerWindow_button01(arg):\n text = cmds.textField('rsuNewLayerWindow_textField01',\n query=True, text=True)\n if len(text) > 0:\n rsUtility.layer(text)\n cmds.deleteUI(windowNewLayerID, window=True)\n window.updateUI(updateRenderSetup=True)\n\n def rsuNewLayerWindow_textField01(arg):\n if len(arg) == 0:\n cmds.button('rsuNewLayerWindow_button01', edit=True,\n enable=False)\n else:\n cmds.button('rsuNewLayerWindow_button01', edit=True,\n enable=True)\n\n cmds.columnLayout(\n 'rsuNewLayerWindow_columnLayout01',\n parent=windowNewLayerID,\n columnAlign='center',\n columnAttach=('both', 10),\n columnWidth=WIDTH,\n rowSpacing=1,\n )\n\n addSeparator('rsuNewLayerWindow_sep01', height=FRAME_MARGIN)\n addText('rsuNewLayerWindow_enterText', 'New layer name:',\n font='boldLabelFont')\n addSeparator('rsuNewLayerWindow_sep02', height=FRAME_MARGIN)\n addTextField('rsuNewLayerWindow_textField01', '',\n rsuNewLayerWindow_textField01,\n rsuNewLayerWindow_textField01,\n rsuNewLayerWindow_textField01)\n cmds.columnLayout('rsuNewLayerWindow_columnLayout02',\n columnAlign='center', columnAttach=('both', 0),\n columnWidth=WIDTH - FRAME_MARGIN * 2)\n addButton('rsuNewLayerWindow_button01', 'Create',\n command=rsuNewLayerWindow_button01, enable=False)\n addSeparator('rsuNewLayerWindow_sep03', height=FRAME_MARGIN)\n cmds.showWindow(cmds.window(windowNewLayerID, q=True))\n\n # Match window position to parent\n\n q.getQItem(windowNewLayerID, QtWidgets.QWidget)\n globalPos = window.mapToGlobal(window.pos())\n x = globalPos.x() + 28\n y = globalPos.y()\n q.widget.move(x, y)", "def add_item_popup(self):\n popup = GetItemURLDialogue(self, \"Add Item\", \"\", \"\")\n if not popup.cancelled:\n self.add_item(popup.name, popup.url)", "def add_material_page(wiz, title, params):\n add_grid_page(wiz, u\"Material properties\", title, params)", "def addObject(self):\n\t\tsel = mc.ls( sl = True, typ = 'transform' )\n\t\tif sel:\n\t\t\tself.objects_lw.addItems( sel )", "def addObject(self,object):\n object.screen = self.screen\n object.parent = self\n self.addList.append(object)", "def click_add():\n # TODO: 1. In frontend_script.py, create function \"create_window()\" that takes a Toplevel() as a parameter.\n # TODO: 2. In this file, implement the code below\n # new_window = Toplevel(root)\n # frontend_script.create_window(new_window)", "def _addToKnowledge(pop):\n perspective = pop.getPerspectiveName()\n if perspective not in _byPerspective:\n _byPerspective[perspective] = []\n _byPerspective[perspective].append(pop)\n nodeOid = pop.getNodeOid()\n if nodeOid not in _byNodeOid:\n _byNodeOid[nodeOid] = sets.Set()\n _byNodeOid[nodeOid].add(perspective)\n apoOid = pop.getApoOid()\n if apoOid not in _byApoOid:\n _byApoOid[apoOid] = {}\n _byApoOid[apoOid][perspective] = pop", "def on_activate(self, caller):\n self.window = GameWindow()\n self.add_window(self.window)", "def AppendWindow(self, window, sashPos=-1):\n self.InsertWindow(len(self._windows), window, sashPos)", "def add(self, args):\n\n if not args:\n self.err_print('One argument required')\n return -1\n\n if len(args) == 1:\n pl = self.ui.leftwin.highlighted().data\n else:\n ind = self.pl_exists(args[1])\n if ind < 0:\n return -1\n\n pl = self.ui.leftwin.data[ind].data\n\n newitem = args[0]\n\n #TODO:\n #consider a bunch of different return values for\n #sqlr to differentiate\n if os.path.isfile(newitem):\n pl.insert(newitem)\n elif os.path.isdir(newitem):\n pl.insert_dir(newitem)\n\n self.ui.rightwin.disp()\n\n return 0", "def add_window(self, window: AbstractView) -> None:\n self._logger.debug(\"running\")\n window.setParent(self)\n self.addSubWindow(window)\n window.show()\n window.restore_window()\n self._logger.debug(\"done\")", "def stackingWindows():\n space = 50\n offset = 70\n cv2.moveWindow(\"Original image\", space, space)\n cv2.moveWindow(\"Keypoints original\", space, hsize + space + offset)\n cv2.moveWindow(\"Color matched\", wsize + space, space)\n cv2.moveWindow(\"Keypoints Dark\", wsize + space, hsize + space + offset)", "def add_box(self):\n self.scenes[self.current_scene].add_object(Box())\n self.redraw()", "def _addView(self, win, fn=None, noName=\"\", addNext=False, indexes=None):\n raise RuntimeError('Not implemented')" ]
[ "0.65281814", "0.59958583", "0.5550467", "0.54392403", "0.5326437", "0.5264936", "0.52013224", "0.5158011", "0.51520026", "0.5143954", "0.51338893", "0.5127427", "0.50639695", "0.50639695", "0.50639695", "0.5023666", "0.49614492", "0.49010316", "0.48866042", "0.48582387", "0.48047468", "0.47521916", "0.47159773", "0.46373957", "0.46104077", "0.46076044", "0.46043447", "0.45973745", "0.45956144", "0.45909134" ]
0.81795806
0
Adds all of the window's views defined in the window.
def _add_all(self, window): for view in window.views: if view.visible: self._add_view(window, view) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_views(self, *args):\n for view in args:\n self.add_view(view)", "def _add_contents(self, window, contents):\r\n\r\n # If we are adding specific contents then we ignore any default view\r\n # visibility.\r\n #\r\n # fixme: This is a bit ugly! Why don't we pass the visibility in to\r\n # 'window.add_view'?\r\n for view in window.views:\r\n view.visible = False\r\n \r\n for item in contents:\r\n self._add_perspective_item(window, item)\r\n \r\n return", "def _add_view(self, window, view):\r\n\r\n # If no 'relative_to' is specified then the view is positioned\r\n # relative to the editor area.\r\n if len(view.relative_to) > 0:\r\n relative_to = window.get_view_by_id(view.relative_to)\r\n \r\n else:\r\n relative_to = None\r\n\r\n # Add the view to the window.\r\n window.add_view(\r\n view, view.position, relative_to, (view.width, view.height)\r\n )\r\n\r\n return", "def add_views_widget(self):\n axial_view = QtWidgets.QPushButton(\"Axial\")\n coronal_view = QtWidgets.QPushButton(\"Coronal\")\n sagittal_view = QtWidgets.QPushButton(\"Sagittal\")\n views_box = QtWidgets.QGroupBox(\"Views\")\n views_box_layout = QtWidgets.QVBoxLayout()\n views_box_layout.addWidget(axial_view)\n views_box_layout.addWidget(coronal_view)\n views_box_layout.addWidget(sagittal_view)\n views_box.setLayout(views_box_layout)\n self.grid.addWidget(views_box, 3, 0, 2, 2)\n axial_view.clicked.connect(self.set_axial_view)\n coronal_view.clicked.connect(self.set_coronal_view)\n sagittal_view.clicked.connect(self.set_sagittal_view)", "def create(self, window):\r\n\r\n # Set the size of the editor area.\r\n if self.editor_area_size != (-1, -1):\r\n window.editor_area_size = self.editor_area_size\r\n\r\n # If the perspective has specific contents then add just those.\r\n if len(self.contents) > 0:\r\n self._add_contents(window, self.contents)\r\n\r\n # Otherwise, add all of the views defined in the window at their\r\n # default positions realtive to the editor area.\r\n else:\r\n self._add_all(window)\r\n\r\n # Activate the first view in every region.\r\n window.reset_views()\r\n \r\n return", "def refresh_windows(self):\n for window_class, windows in self.extra_windows.items():\n obj = 'lord' if window_class is Nobleman else 'location'\n func = eval(f'self.manager.get_{obj}_of_id')\n for id, window in windows.items():\n instance = func(id)\n self.destroy_children_widgets(window)\n self.generate_window_content(instance, window)\n self.update_widgets_values()", "def _addPanes(self):\n\n self._addPaneMapWindow()\n self._addPaneToolbar(name = 'digitMap')", "def views(self, views):\n\n self._views = views", "def add_window(self, window: AbstractView) -> None:\n self._logger.debug(\"running\")\n window.setParent(self)\n self.addSubWindow(window)\n window.show()\n window.restore_window()\n self._logger.debug(\"done\")", "def viewAll(self):\n self._sceneviewer.viewAll()", "def update_views():\n # replace Supervisor main entry\n here = path.abspath(path.dirname(__file__))\n # set main page\n VIEWS['index.html'] = {'template': path.join(here, 'ui/index.html'), 'view': SupvisorsView}\n # set address /processpage\n VIEWS['procaddress.html'] = {'template': path.join(here, 'ui/procaddress.html'), 'view': ProcAddressView}\n # set address/host page\n VIEWS['hostaddress.html'] = {'template': path.join(here, 'ui/hostaddress.html'), 'view': HostAddressView}\n # set application page\n VIEWS['application.html'] = {'template': path.join(here, 'ui/application.html'), 'view': ApplicationView}\n # set fake page to export images\n VIEWS['process_cpu.png'] = {'template': path.join(here, 'ui/empty.html'), 'view': ProcessCpuImageView}\n VIEWS['process_mem.png'] = {'template': path.join(here, 'ui/empty.html'), 'view': ProcessMemoryImageView}\n VIEWS['address_cpu.png'] = {'template': path.join(here, 'ui/empty.html'), 'view': AddressCpuImageView}\n VIEWS['address_mem.png'] = {'template': path.join(here, 'ui/empty.html'), 'view': AddressMemoryImageView}\n VIEWS['address_io.png'] = {'template': path.join(here, 'ui/empty.html'), 'view': AddressNetworkImageView}", "def update_views():\n # replace Supervisor main entry\n here = os.path.abspath(os.path.dirname(__file__))\n # set main page\n VIEWS['index.html'] = {'template': os.path.join(here, 'ui/index.html'),\n 'view': SupvisorsView}\n # set address /processpage\n VIEWS['procaddress.html'] = {'template': os.path.join(\n here, 'ui/procaddress.html'),\n 'view': ProcAddressView}\n # set address/host page\n VIEWS['hostaddress.html'] = {'template': os.path.join(\n here, 'ui/hostaddress.html'),\n 'view': HostAddressView}\n # set application page\n VIEWS['application.html'] = {'template': os.path.join(\n here, 'ui/application.html'),\n 'view': ApplicationView}\n # set fake page to export images\n VIEWS['process_cpu.png'] = {'template': os.path.join(\n here, 'ui/empty.html'),\n 'view': ProcessCpuImageView}\n VIEWS['process_mem.png'] = {'template': os.path.join(\n here, 'ui/empty.html'),\n 'view': ProcessMemoryImageView}\n VIEWS['address_cpu.png'] = {'template': os.path.join(\n here, 'ui/empty.html'),\n 'view': AddressCpuImageView}\n VIEWS['address_mem.png'] = {'template': os.path.join(\n here, 'ui/empty.html'),\n 'view': AddressMemoryImageView}\n VIEWS['address_io.png'] = {'template': os.path.join(\n here, 'ui/empty.html'),\n 'view': AddressNetworkImageView}", "def create_views(self):\n # Extract view objects\n customer_views = CustomerViews().views\n admin_views = AdminViews().views\n\n # Add customer views/routes\n for view in customer_views:\n view_obj = customer_views.get(view)\n endpoint = view_obj.endpoint\n view_name = view_obj.name\n self.add_url_rule(endpoint, view_func=view_obj.as_view(view_name))\n \n # Add admin views/routes\n for view in admin_views:\n view_obj = admin_views.get(view)\n endpoint = view_obj.endpoint\n view_name = view_obj.name\n self.add_url_rule(endpoint, view_func=view_obj.as_view(view_name))", "def add_views(apps, schema_editor):\n connection = schema_editor.connection\n with connection.cursor() as cur:\n for view in reversed(OCP_ALL_VIEWS):\n LOG.info(f\"\"\"Dropping materialized view \"{view}\" with cascade\"\"\")\n cur.execute(f\"\"\"DROP MATERIALIZED VIEW \"{view}\" CASCADE;\"\"\")\n\n for view in OCP_ALL_VIEWS:\n view_sql = pkgutil.get_data(\"reporting.provider.all.openshift\", f\"sql/views/{view}.sql\")\n view_sql = view_sql.decode(\"utf-8\")\n LOG.info(f\"\"\"Creating materialized view \"{view}\"...\"\"\")\n with connection.cursor() as cursor:\n cursor.execute(view_sql)", "def extend_ui(self):\n for name, tab in self.build_general_tabs().items():\n scroll = self.build_tab()\n self.add_tab(\"General\", name, scroll)\n self.fill_tab(\"General\", name, tab)\n for name, tab in self.build_display_tabs().items():\n scroll = self.build_tab()\n self.add_tab(\"Display\", name, scroll)\n self.fill_tab(\"Display\", name, tab)\n for name, tab in self.build_data_tabs().items():\n scroll = self.build_tab()\n self.add_tab(\"Data\", name, scroll)\n self.fill_tab(\"Data\", name, tab)", "def see_all_groups(self):\n self.all_group_window = AllGroups(self)\n self.all_group_window.show()\n self.object_created.append(self.all_group_window)", "def _addView(self, win, fn=None, noName=\"\", addNext=False, indexes=None):\n raise RuntimeError('Not implemented')", "def create_all_views():\n cursor.execute(articleList)\n cursor.execute(goodViews)\n cursor.execute(authorsTitles)\n cursor.execute(titleViews)\n cursor.execute(dailyTotalView)\n cursor.execute(dailyErrorView)", "def createViews(views):\n ...", "def refresh(self):\n\n for w in self.windows.values():\n w.refresh()", "def views(self, location, publish=False):\n self.package.add_views(location)\n # register views into project\n self.application.make(\"view\").add_namespaced_location(\n self.package.name, self.package.views\n )\n\n if publish:\n location_abs_path = self.package._build_path(location)\n for dirpath, _, filenames in os.walk(location_abs_path):\n for f in filenames:\n # don't add other files than templates\n view_abs_path = join(dirpath, f)\n _, ext = os.path.splitext(view_abs_path)\n if ext != \".html\":\n continue\n self.package.add_publishable_resource(\n \"views\",\n view_abs_path,\n views_path(\n join(\n self.vendor_prefix,\n self.package.name,\n relpath(view_abs_path, location_abs_path),\n )\n ),\n )\n\n return self", "def add_window(self, window):\n if not self.valid_window(window):\n return False\n self.windows.append(window)\n window.tiler = self\n if window not in self.start_positions.keys():\n self.start_positions[window] = window.display_size\n\n rules = config.GET_RULES(window.classname)\n if rules is not None and re.search(rules[\"regex\"], window.title) is not None:\n if \"floating\" in rules:\n window.set_floating(rules[\"floating\"])\n if \"decorated\" in rules:\n if bool(rules[\"decorated\"]):\n window.enable_decoration()\n else:\n window.disable_decoration()\n if \"position\" in rules:\n window.move_to(tuple(rules[\"position\"]))\n\n print(\"Added window: {0}\".format(window))\n window.print_window_styles()\n return True", "def windows(self, windows):\n\n self._windows = windows", "def createWidgets(self):\n layout = QHBoxLayout()\n \n self.logsItem = TestsView.TestsView(parent=self, local = self.local)\n \n self.resumeView = ResumeView.TextualView(parent=self)\n if QtHelper.str2bool( Settings.instance().readValue( key = 'TestRun/hide-resume-view' ) ):\n self.hideResumeView()\n\n self.graphView = GraphView.FlowChartView(parent=self)\n self.logsView = TextualView.TextualView2(parent=self)\n self.hexLogsView = DetailedView.DetailedView(parent=self)\n \n self.displayTab = QTabWidget()\n\n hSplitter = QSplitter(self)\n hSplitter.setOrientation(Qt.Vertical)\n\n hSplitter.addWidget( self.resumeView )\n hSplitter.addWidget( self.logsView )\n hSplitter.addWidget( self.hexLogsView )\n\n self.displayTab.addTab(hSplitter, self.tr('Events') )\n self.displayTab.addTab(self.graphView, self.tr('Diagram') )\n \n defaultTab = Settings.instance().readValue( key = 'TestRun/default-tab-run' )\n self.displayTab.setCurrentIndex(int(defaultTab)) \n \n self.currentEdit = QLineEdit()\n self.currentEdit.setReadOnly(True)\n self.currentEdit.setStyleSheet(\"QLineEdit { background-color : #F0F0F0; color: grey; }\")\n\n leftFrame = QFrame()\n leftLayout = QVBoxLayout()\n leftLayout.setContentsMargins(0, 0, 0, 0) \n leftFrame.setLayout(leftLayout)\n\n leftLayout.addWidget(self.currentEdit)\n leftLayout.addWidget(self.displayTab)\n\n v_splitter = QSplitter(self) \n v_splitter.addWidget( self.logsItem )\n v_splitter.addWidget( leftFrame )\n v_splitter.setStretchFactor(1, 1)\n\n layout.addWidget(v_splitter)\n \n self.setLayout(layout)", "def build_frames(self):\n self.cntrl_frame = tk.PanedWindow(self.root)\n self.cntrl_frame.pack(side = tk.TOP, padx = 1, pady = 1, fill = tk.Y)\n self.info_frame_1 = tk.PanedWindow(self.root)\n self.info_frame_1.pack(side = tk.TOP, padx = 1, pady = 2, fill = tk.Y)", "def __newDocumentView(self):\n aw = self.activeWindow()\n if aw:\n self.newEditorView(aw.getFileName(), aw, aw.getFileType())", "def views(self):\r\n return Views(self)", "def window_handles(self):\n pass", "def add_routes(app: web.Application):\n ActionsView.register_view(app)\n PingView.register_view(app)\n CoreShutdownView.register_view(app)\n CoreRestartView.register_view(app)\n ReloadConfigView.register_view(app)\n ListItemsView.register_view(app)\n GetItemView.register_view(app)\n ItemStatesView.register_view(app)\n ItemStateView.register_view(app)\n ActionsView.register_view(app)\n ExecuteActionView.register_view(app)\n ListModulesView.register_view(app)", "def sort_views_by_relevance(self):\n window = sublime.active_window()\n\n # add the current view is the most relevant\n views = [self.view]\n try:\n # the second most relevant suggestions are from the indexed panels\n for panel_name in panel_state:\n panel = window.find_output_panel(panel_name)\n panel.file_name = lambda v=panel_name: v \n views.append(panel)\n except Exception as e:\n print('No panel', e)\n\n # the last but not least are the open views\n for view in window.views():\n if view is not self.view:\n views.append(view)\n\n return views" ]
[ "0.6691571", "0.6673106", "0.63949", "0.6238598", "0.6078048", "0.6038041", "0.5954232", "0.5949496", "0.5762432", "0.57136166", "0.56896406", "0.5647092", "0.557947", "0.5575854", "0.55426127", "0.5486439", "0.5435242", "0.543413", "0.5398227", "0.53780955", "0.53368187", "0.5304771", "0.52967066", "0.5291112", "0.52826357", "0.5240135", "0.52330905", "0.5222008", "0.5208226", "0.5186297" ]
0.8775787
0
Adds a view to a window.
def _add_view(self, window, view): # If no 'relative_to' is specified then the view is positioned # relative to the editor area. if len(view.relative_to) > 0: relative_to = window.get_view_by_id(view.relative_to) else: relative_to = None # Add the view to the window. window.add_view( view, view.position, relative_to, (view.width, view.height) ) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_window(self, window: AbstractView) -> None:\n self._logger.debug(\"running\")\n window.setParent(self)\n self.addSubWindow(window)\n window.show()\n window.restore_window()\n self._logger.debug(\"done\")", "def add_view(self, view):\n # Add to views\n self._views.append(view)\n\n # If app was provided in constructor, register view with Flask app\n if self.app is not None:\n self.app.register_blueprint(view.create_blueprint(self))\n if view.is_menu:\n self._add_view_to_menu(view)", "def add_window(self, window):\n if not self.valid_window(window):\n return False\n self.windows.append(window)\n window.tiler = self\n if window not in self.start_positions.keys():\n self.start_positions[window] = window.display_size\n\n rules = config.GET_RULES(window.classname)\n if rules is not None and re.search(rules[\"regex\"], window.title) is not None:\n if \"floating\" in rules:\n window.set_floating(rules[\"floating\"])\n if \"decorated\" in rules:\n if bool(rules[\"decorated\"]):\n window.enable_decoration()\n else:\n window.disable_decoration()\n if \"position\" in rules:\n window.move_to(tuple(rules[\"position\"]))\n\n print(\"Added window: {0}\".format(window))\n window.print_window_styles()\n return True", "def _addView(self, win, fn=None, noName=\"\", addNext=False, indexes=None):\n raise RuntimeError('Not implemented')", "def add_view_step(self, view_step):\n self._data_dict[self.KEY_VIEW_STEPS].append(view_step)", "def create(self, window):\r\n\r\n # Set the size of the editor area.\r\n if self.editor_area_size != (-1, -1):\r\n window.editor_area_size = self.editor_area_size\r\n\r\n # If the perspective has specific contents then add just those.\r\n if len(self.contents) > 0:\r\n self._add_contents(window, self.contents)\r\n\r\n # Otherwise, add all of the views defined in the window at their\r\n # default positions realtive to the editor area.\r\n else:\r\n self._add_all(window)\r\n\r\n # Activate the first view in every region.\r\n window.reset_views()\r\n \r\n return", "def addView(self, dataView):\n hooks = self.getHooks()\n if hooks is not None:\n dataView.setHooks(hooks)\n self.__views.append(dataView)", "def add_view(self, *args, **kwargs):\n return self._resources_manager.add_view(*args, **kwargs)", "def _add_all(self, window):\r\n\r\n for view in window.views:\r\n if view.visible:\r\n self._add_view(window, view)\r\n\r\n return", "def _add_view_to_menu(self, view):\n self._add_menu_item(MenuView(view.name, view), view.category)", "def _add_contents(self, window, contents):\r\n\r\n # If we are adding specific contents then we ignore any default view\r\n # visibility.\r\n #\r\n # fixme: This is a bit ugly! Why don't we pass the visibility in to\r\n # 'window.add_view'?\r\n for view in window.views:\r\n view.visible = False\r\n \r\n for item in contents:\r\n self._add_perspective_item(window, item)\r\n \r\n return", "def _add_perspective_item(self, window, item):\r\n\r\n # If no 'relative_to' is specified then the view is positioned\r\n # relative to the editor area.\r\n if len(item.relative_to) > 0:\r\n relative_to = window.get_view_by_id(item.relative_to)\r\n \r\n else:\r\n relative_to = None\r\n\r\n # fixme: This seems a bit ugly, having to reach back up to the\r\n # window to get the view. Maybe its not that bad?\r\n view = window.get_view_by_id(item.id)\r\n if view is not None:\r\n # fixme: This is probably not the ideal way to sync view traits\r\n # and perspective_item traits.\r\n view.style_hint = item.style_hint\r\n # Add the view to the window.\r\n window.add_view(\r\n view, item.position, relative_to, (item.width, item.height)\r\n )\r\n\r\n else:\r\n # The reason that we don't just barf here is that a perspective\r\n # might use views from multiple plugins, and we probably want to\r\n # continue even if one or two of them aren't present.\r\n #\r\n # fixme: This is worth keeping an eye on though. If we end up with\r\n # a strict mode that throws exceptions early and often for\r\n # developers, then this might be a good place to throw one ;^)\r\n logger.error('missing view for perspective item <%s>' % item.id)\r\n\r\n return", "def AppendWindow(self, window, sashPos=-1):\n self.InsertWindow(len(self._windows), window, sashPos)", "def __newDocumentView(self):\n aw = self.activeWindow()\n if aw:\n self.newEditorView(aw.getFileName(), aw, aw.getFileType())", "def add_views_widget(self):\n axial_view = QtWidgets.QPushButton(\"Axial\")\n coronal_view = QtWidgets.QPushButton(\"Coronal\")\n sagittal_view = QtWidgets.QPushButton(\"Sagittal\")\n views_box = QtWidgets.QGroupBox(\"Views\")\n views_box_layout = QtWidgets.QVBoxLayout()\n views_box_layout.addWidget(axial_view)\n views_box_layout.addWidget(coronal_view)\n views_box_layout.addWidget(sagittal_view)\n views_box.setLayout(views_box_layout)\n self.grid.addWidget(views_box, 3, 0, 2, 2)\n axial_view.clicked.connect(self.set_axial_view)\n coronal_view.clicked.connect(self.set_coronal_view)\n sagittal_view.clicked.connect(self.set_sagittal_view)", "def add_view( *args, **kwargs ):", "def click_add():\n # TODO: 1. In frontend_script.py, create function \"create_window()\" that takes a Toplevel() as a parameter.\n # TODO: 2. In this file, implement the code below\n # new_window = Toplevel(root)\n # frontend_script.create_window(new_window)", "def _showView(self, win, fn=None):\n raise RuntimeError('Not implemented')", "def addMainWindow(self,appendToTask):\n self.appendToTask = appendToTask", "def Window(self, w):\r\n\r\n self.window = w\r\n return self", "def register_extra_window(self,\n instance: Union[Nobleman, Location],\n window: tk.Toplevel):\n self.extra_windows[instance.__class__][instance.id] = window", "def add_views(self, *args):\n for view in args:\n self.add_view(view)", "def on_activate(self, caller):\n self.window = GameWindow()\n self.add_window(self.window)", "def addView(self, dataView):\n hooks = self.getHooks()\n if hooks is not None:\n dataView.setHooks(hooks)\n self.__views[dataView] = None", "def add_view_pl_button(self):\n self.view_pl = QPushButton(\"View Playlist\")\n self.view_pl.clicked.connect(self.view_pl_btn_push)\n self.hbtnbox.addWidget(self.view_pl)", "def SetWindow(self, w):\r\n\r\n self.window = w", "def show(self, window):\r\n\r\n return", "def InsertWindow(self, idx, window, sashPos=-1):\n assert window not in self._windows, \"A window can only be in the splitter once!\"\n self._windows.insert(idx, window)\n self._sashes.insert(idx, -1)\n if not window.IsShown():\n window.Show()\n if sashPos != -1:\n self._pending[window] = sashPos\n self._checkRequestedSashPosition = False\n self._SizeWindows()", "def _add_element(self, element: Element, add_relationships: bool) -> ElementView:\n if element not in self.model:\n raise RuntimeError(\n f\"The element {element} does not exist in the model associated with \"\n f\"this view.\"\n )\n view = self.find_element_view(element=element)\n if view is None:\n view = ElementView(element=element)\n self.element_views.add(view)\n if add_relationships:\n self._add_relationships(element)\n return view", "def add(self, widget: Component) -> None:\n self._root.add(widget)" ]
[ "0.70577765", "0.6667131", "0.6417828", "0.63806814", "0.6346493", "0.630583", "0.62911326", "0.6290349", "0.6270349", "0.6236298", "0.6112068", "0.6045884", "0.60054344", "0.5985006", "0.5970879", "0.5946463", "0.5931123", "0.5889573", "0.58193064", "0.5818502", "0.57805985", "0.5762744", "0.57069254", "0.5694062", "0.5658514", "0.5638107", "0.56116116", "0.5589979", "0.5536084", "0.5535355" ]
0.8629264
0
When a required privilege is not even defined in the database, permission is denied; no crashing.
def test_requires_privilege_no_such(self): @requires_privilege('bomboozle', domain='zizzle') def view(request, *args, **kwargs): pass requestor_role = arbitrary.role() request = HttpRequest() request.role = requestor_role with self.assertRaises(PermissionDenied): view(request)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def DeniedPermissions(self) -> _n_6_t_0:", "def test_requires_privilege_denied(self):\n\n @requires_privilege(self.zazzle_privilege.slug, domain='zizzle')\n def view(request, *args, **kwargs):\n pass\n\n requestor_role = arbitrary.role()\n\n request = HttpRequest()\n request.role = requestor_role.instantiate({})\n with self.assertRaises(PermissionDenied):\n view(request)", "def cmd_crash_private(self, argument):\n if self.is_admin:\n raise IndexError()\n else:\n self.logger.warning(\"User %s tried to use '%s' without being admin\" % (self.nick, \"crash\"))", "def test_requires_privilege_no_current_role(self):\n @requires_privilege(self.zazzle_privilege.slug, domain='zizzle')\n def view(request, *args, **kwargs):\n pass\n\n request = HttpRequest()\n with self.assertRaises(PermissionDenied):\n view(request)", "def no_reason(message, db):\n #message.reply(Strings['GRANT_EXAMPLE'].format(db))\n try:\n hf.grant(message, db.lower(), \"[EXTENDING ACCESS TIME]\", False)\n except Exception as e:\n message._client.send_message(errors_channel, \"```{}```\".format(e))", "def no_reason(message, db):\n #message.reply(Strings['GRANT_EXAMPLE'].format(db))\n try:\n hf.grant(message, db.lower(), \"[EXTENDING ACCESS TIME]\", True)\n except Exception as e:\n message._client.send_message(errors_channel, \"```{}```\".format(e))", "def no_reason(message, db):\n message.reply(Strings['GRANT_EXAMPLE'].format(db))", "def __require_privilaged_access(self):\n if not self.getLoggedInUser():\n raise codechecker_api_shared.ttypes.RequestFailed(\n codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED,\n \"The server must be start by using privilaged access to \"\n \"execute this action.\")", "def can(self, unused_perm):\n return False", "def is_access_allowed(self, user_id):\n ### DATABASE CODE GOES HERE\n return False", "def permits(identity, obj, permission):\n return False", "def allow_privilege_escalation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_privilege_escalation\")", "def allow_privilege_escalation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_privilege_escalation\")", "def addPermissionIfMissing(session,perm,desc):\n try:\n db.getPerm(session,perm)\n return False\n except NoResultFound:\n session.add(db.makePermission(perm,desc))\n acserver.log(\"Authentication: Adding permission %s\"%perm)\n session.commit()\n return True", "def RequestedPermissions(self) -> _n_6_t_0:", "def _handle_privilege(self, msg: Message):\n for perm in msg[\"privilege\"][\"perms\"]:\n self.granted_privileges[perm[\"access\"]] = perm[\"type\"]\n log.debug(f\"Privileges: {self.granted_privileges}\")\n self.xmpp.event(\"privileges_advertised\")", "def is_allowed_to_do(cls, db_tuple, action, target, actor, should_raise_insufficent_priv_ex=True):\n action_check_fn = cls.get_action_check_fn(action)\n \n if action_check_fn is None:\n raise cls.UnrecognizedActionException('unrecognized action: %s' % action)\n \n # i do what i want!\n if actor.metaspace_privileges.has_privilege(MetaspacePrivilegeSet.SUPER):\n return True\n \n can_do_action = action_check_fn(db_tuple, target, actor)\n if should_raise_insufficent_priv_ex and not can_do_action:\n raise cls.InsufficientPrivilegesException('%s (user_id=%i) is not allowed to perform %s' % (actor.email_addr, actor.user_id, action))\n else:\n return can_do_action", "def check_permission():\n if IS_ADMIN:\n out_info(\"Running as Root/Admin\")\n else:\n out_warning(\"Running without root/admin privileges\")", "def grant_access_rw(message, db, reason):\n grant_sql_access(message, db, reason, False)", "def require(assertion):\n if not assertion:\n raise PermissionDenied", "def require(assertion):\n if not assertion:\n raise PermissionDenied", "def create_missing_perms(self) -> None:\n\n # pylint: disable=import-outside-toplevel\n from superset.connectors.sqla.models import SqlaTable\n from superset.models import core as models\n\n logger.info(\"Fetching a set of all perms to lookup which ones are missing\")\n all_pvs = set()\n for pv in self.get_session.query(self.permissionview_model).all():\n if pv.permission and pv.view_menu:\n all_pvs.add((pv.permission.name, pv.view_menu.name))\n\n def merge_pv(view_menu: str, perm: Optional[str]) -> None:\n \"\"\"Create permission view menu only if it doesn't exist\"\"\"\n if view_menu and perm and (view_menu, perm) not in all_pvs:\n self.add_permission_view_menu(view_menu, perm)\n\n logger.info(\"Creating missing datasource permissions.\")\n datasources = SqlaTable.get_all_datasources(self.get_session)\n for datasource in datasources:\n merge_pv(\"datasource_access\", datasource.get_perm())\n merge_pv(\"schema_access\", datasource.get_schema_perm())\n\n logger.info(\"Creating missing database permissions.\")\n databases = self.get_session.query(models.Database).all()\n for database in databases:\n merge_pv(\"database_access\", database.perm)", "def elevate_priv_if_needed(func):\n def inner(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except OSError as e:\n logger.debug('Elevating privileges due to receiving permission errror')\n logger.debug(e)\n return run_as_root(func)(*args, **kwargs)\n\n return inner", "def test_03_self_cannot_upgrade_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.VIEW)\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))", "def get_everyone_denied(self):", "def require_server_administrator():\n if not test_server_administrator():\n raise cherrypy.HTTPError(403)", "def _check_writable_(self):\n self._check_within_context_()\n if self._mode != 'w':\n raise Exception('Cannot update database: read only mode')", "def test_update_privilege_with_invalid_volume_size(self):\n\n # Create a tenant\n tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)\n\n # Create a privilege without volume size settings\n privilege = vim.vcs.storage.DatastoreAccessPrivilege()\n privilege.datastore = self.datastore\n privilege.allow_create = True\n\n # Add privilege to the tenant\n self.tenantMgr.AddPrivilege(tenant, privilege)\n\n # Update the privilege with invalid volume size\n with self.assertRaises(vmodl.fault.InvalidArgument):\n self.tenantMgr.UpdatePrivilege(tenant, self.datastore, volume_max_size=2048, volume_total_size=1024)", "def OptionalPermissions(self) -> _n_6_t_0:", "def permissions():\n pass" ]
[ "0.67563957", "0.6332472", "0.6197399", "0.61634755", "0.6141772", "0.6102919", "0.6063586", "0.59873784", "0.5971427", "0.59551877", "0.58554024", "0.5852332", "0.5852332", "0.5848382", "0.580401", "0.57996124", "0.5798952", "0.57737225", "0.5743074", "0.57005423", "0.57005423", "0.56789786", "0.5615169", "0.56032574", "0.5601018", "0.55633444", "0.55598456", "0.5553271", "0.5534061", "0.55229115" ]
0.6504519
1
Parses a line from subnetdata.txt Returns None if the line did not contain network information. Otherwise it returns the attributes that are interesting to us as a dict.
def parse_line(self, line): # Format of subnetdata.txt: # - Fields are separated by tabs # - A field is a key/value pair, separated by a space # - The value of the DefaultRouters field is a comma-separated list of # IP addresses # - The value of the UDF field is a list of "<key>=<value>" pairs, # separated by ';' qipinfo = {} name = None location = None network_type = "unknown" side = "a" routers = [] compartment = None fields = line.split("\t") for field in fields: # The value may contain embedded spaces (key, value) = field.split(" ", 1) # Some fields contain structured data if key == "UDF": udf = {} for item in value.split(";"): (udfkey, udfvalue) = item.split("=", 1) udf[udfkey] = udfvalue value = udf qipinfo[key] = value # Sanity check if "SubnetId" not in qipinfo or "SubnetAddress" not in qipinfo or \ "SubnetMask" not in qipinfo: self.logger.info("WARNING: Line contains no network: %s" % line) return None if "SubnetName" in qipinfo: name = qipinfo["SubnetName"].strip().lower() if not name: name = qipinfo["SubnetAddress"] # Parse the network address/netmask address = IPv4Network(u"%s/%s" % (qipinfo["SubnetAddress"], qipinfo["SubnetMask"])) # Parse the list of routers if "DefaultRouters" in qipinfo: for addr in qipinfo["DefaultRouters"].split(","): routers.append(IPv4Address(text_type(addr))) if self.precreated_compartments_only and ("UDF" not in qipinfo or "COMPARTMENT" not in qipinfo["UDF"]): if not self.missing_compartments: self.logger.client_info("Missing network compartment info and " "precreated_compartments_only set to " "True, skipping these networks.") self.missing_compartments = True return None # Extract MS-specific information from the UDF field if "UDF" in qipinfo: if "LOCATION" in qipinfo["UDF"]: # Values in QIP sometimes contain spaces and mixed case syslocstr = qipinfo["UDF"]["LOCATION"].strip().lower() sysloc = syslocstr.split('.') if len(sysloc) >= 3: if sysloc[-3] in self.buildings: location = self.buildings[sysloc[-3]] else: # Do not make "refresh network --all" fail if a new # building does not exist in AQDB yet. Warn once for # every unknown sysloc we encounter. if syslocstr in self.unknown_syslocs: return None self.unknown_syslocs.add(syslocstr) self.logger.client_info("Unknown building code in sysloc " "%s, ignoring" % syslocstr) return None else: raise ValueError("Failed to parse LOCATION") if "BUCKET" in qipinfo["UDF"] and location: bucket = qipinfo["UDF"]["BUCKET"].strip().lower() bunker = bucket + "." + location.name if bunker in self.bunkers: location = self.bunkers[bunker] if "TYPE" in qipinfo["UDF"]: network_type = qipinfo["UDF"]["TYPE"].strip().lower() if "SIDE" in qipinfo["UDF"]: side = qipinfo["UDF"]["SIDE"].strip().lower() if "COMPARTMENT" in qipinfo["UDF"]: compartment_name = qipinfo["UDF"]["COMPARTMENT"].strip().lower() if self.ignore_net_compartments and \ self.ignore_net_compartments.match(compartment_name): if compartment_name not in self.ignored_compartments: self.logger.client_info("Network compartment {} matches 'ignore_network_compartments_regex', " "skipping these networks.".format(compartment_name)) self.ignored_compartments.add(compartment_name) return None if compartment_name in self.compartments: compartment = self.compartments[compartment_name] elif self.precreated_compartments_only: if compartment_name not in self.unknown_compartments: self.logger.client_info("Unknown network compartment {} and " "precreated_compartments_only set to " "True, skipping these networks.".format(compartment_name)) self.unknown_compartments.add(compartment_name) return None elif compartment_name not in self.unknown_compartments: self.logger.client_info("Unknown compartment %s," " ignoring" % compartment_name) self.unknown_compartments.add(compartment_name) # FIXME: How to handle networks with no location? dsdb maps them to # sysloc "xx.ny.na", so mimic that for now if not location: if "xx" in self.buildings: location = self.buildings["xx"] else: # FIXME: the testsuite does not have the "xx" building return None return QIPInfo(name=name, address=address, location=location, network_type=network_type, side=side, routers=routers, compartment=compartment)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_subnet_info(self, context):\n\n subnet = {}\n data = {}\n subnet_id = str(context.get('id', ''))\n data['subnet_id'] = subnet_id\n data['subnet_name'] = str(context.get('name', ''))\n data['tenant_id'] = str(context.get('tenant_id', ''))\n data['network_id'] = str(context.get('network_id', ''))\n data['ip_version'] = str(context.get('ip_version', ''))\n data['gateway_ip'] = str(context.get('gateway_ip', ''))\n ip_mask = str(context.get('cidr', ''))\n data['enable_dhcp'] = context.get('enable_dhcp', '')\n data['shared'] = context.get('shared', '')\n if subnet_id == '':\n LOG.error(_('Get creating subnet information failed'))\n return None\n data['network'], data['network_mask'] = ip_mask.split('/')\n\n context_str = json.dumps(data, sort_keys=True)\n data['md5sum'] = hashlib.md5(context_str).hexdigest()\n\n data['field_not_in_md5'] = ['md5sum']\n\n if subnet_id != '':\n subnet[subnet_id] = data\n return subnet", "def _parse_inet(line):\n tokens = line.split()\n return netaddr.IPNetwork(tokens[1])", "def _parse_head(line):\n retval = {}\n m = re.match(\n '[0-9]+: (?P<if>\\w+\\d{1,3}): <(?P<flags>[^>]+)> mtu (?P<mtu>[0-9]+)',\n line\n )\n if m:\n retval['ifname'] = m.group('if')\n retval['mtu'] = int(m.group('mtu'))\n retval['flags'] = m.group('flags').split(',')\n return retval", "def parse_ip_addr(data):\n # 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000\n # link/ether 52:54:00:a0:b9:b6 brd ff:ff:ff:ff:ff:ff\n # inet 10.133.58.56/20 brd 10.133.63.255 scope global eth0\n # valid_lft 85266sec preferred_lft 85266sec\n # inet6 2001:1bc8:100:6::f301/64 scope global\n # valid_lft forever preferred_lft forever\n for iface in _IP_ADDR_SPLIT_RE.split(data.strip()):\n if not iface:\n continue\n lines = [l.strip() for l in iface.splitlines()]\n info = {\n \"name\": lines.pop(0).partition(\":\")[0],\n \"ip-addresses\": [],\n \"hardware-address\": None,\n }\n for line in lines:\n words = line.split()\n if words[0].startswith(\"link/\") and len(words) >= 2:\n info[\"hardware-address\"] = words[1]\n elif words[0] in (\"inet\", \"inet6\"):\n addrtype = \"ipv6\" if words[0] == \"inet6\" else \"ipv4\"\n addr, _, prefix = words[1].partition(\"/\")\n if prefix == '':\n prefix = 128 if addrtype == \"ipv6\" else 32\n info[\"ip-addresses\"].append({\"ip-address-type\": addrtype, \"ip-address\": addr, \"prefix\": int(prefix)})\n yield info", "def parse(self, src, line):\n r = line.split('\\t')\n p = {}\n if src == 'sf':\n p['businessID'] = r[0]\n p['name'] = r[1]\n p['address'] = r[2]\n p['city'] = r[3]\n p['state'] = r[4]\n p['zip'] = r[5]\n p['latitude'] = r[6]\n p['longitude'] = r[7]\n p['phone'] = r[8]\n elif src == 'nyc':\n p['businessID'] = r[0]\n p['name'] = r[1]\n # nyc separates the building number from the street name\n p['address'] = ' '.join([r[3].strip(), r[4].strip()])\n p['city'] = 'NYC'\n p['state'] = 'NY'\n p['zip'] = r[5]\n p['latitude'] = None\n p['longitude'] = None\n p['phone'] = r[6]\n return p", "def decode_lease(line: str = None) -> dict:\n if line is not None:\n logger.debug('Decoding non-JSON lease line: {}'.format(line))\n line = line.strip().split()\n return {\n 'mac-address': line[1],\n 'ip-address': line[2],\n }", "def get_subnet(self, subnet_id):\n LOG.debug(\"Get subnet %s\", subnet_id)\n\n if subnet_id not in self.subnets_by_id:\n return None\n\n data = self.subnets_by_id[subnet_id]\n LOG.debug(\"Subnet data: %s\", data)\n\n # Convert to form expected by NetModel.\n ip_version = 6 if ':' in data['cidr'] else 4\n subnet = {'enable_dhcp': True,\n 'ip_version': ip_version,\n 'cidr': data['cidr'],\n 'dns_nameservers': data.get('dns_servers') or [],\n 'id': subnet_id,\n 'gateway_ip': data['gateway_ip'],\n 'host_routes': data.get('host_routes', []),\n 'network_id': data.get('network_id', NETWORK_ID)}\n if ip_version == 6:\n subnet['ipv6_address_mode'] = DHCPV6_STATEFUL\n subnet['ipv6_ra_mode'] = DHCPV6_STATEFUL\n\n return dhcp.DictModel(subnet)", "def get_subnet_details(self, subnet_name=\"dummy_subnet\", subnet_id=None):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/subnets\"\n _headers = {'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n result = self.request(\"GET\", _url, _headers, _body)\n if result is None:\n LOG_OBJ.error(\"No response from Server while getting subnets\")\n return result\n if result.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get subnet details Failed with status %s \" %\n result.status)\n return result.status\n\n output = json.loads(result.data)\n\n for subnets in output['subnets']:\n if (subnet_id is not None and (subnets['id'] == subnet_id)) or\\\n subnets['name'].lower() == subnet_name.lower():\n LOG_OBJ.debug(\"Subnet Details: %s\" % subnets)\n return subnets\n\n LOG_OBJ.error(\"Subnet with name:%s or with id:%s is Not Found\" %\n (subnet_name, subnet_id))", "def get_network_info_dict(network):\n info_str = nx.info(network)\n lines = info_str.split('\\n')\n\n info_dict = {}\n for line in lines:\n pair = line.split(':')\n info_dict[pair[0]] = pair[1].strip()\n\n return info_dict", "def _read_info(self):\n my_filelines = self.file_lines\n info = dict()\n\n for i, line in enumerate(my_filelines):\n if line.startswith(\"VEHICLE\"):\n vehicle_pro_start = i + 2\n elif line.startswith(\"CUSTOMER\"):\n customer_pro_start = i + 3\n\n elif line.startswith(\"NUMBER\"):\n splited = line.split(' ')\n info[splited[0]] = 0\n info[splited[-1]] = 0\n return info, (vehicle_pro_start, customer_pro_start)", "def parse_dhcp_stats(input: str) -> dict:\n found_pool = False\n found_dash = False\n\n stats = {}\n\n for line in input.split(\"\\n\"):\n line = line.strip().replace(\"\\x1b[m\", \"\")\n\n if not line:\n continue\n\n if not found_pool and line.startswith(\"pool\"):\n found_pool = True\n continue\n elif found_pool and found_dash is False and line.startswith(\"----\"):\n found_dash = True\n continue\n elif found_pool and found_dash:\n dhcp_stats = line.split()\n if len(dhcp_stats) != 4:\n continue\n stats[dhcp_stats[0]] = {\n \"size\": int(dhcp_stats[1]),\n \"used\": int(dhcp_stats[2]),\n \"avail\": int(dhcp_stats[3])\n }\n return stats", "def get_read_group_info(line, logger=default_logger):\n\n rg_dict = dict()\n #Initialize the dictionary, so we know if any fields are missing\n rg_dict[\"PI\"] = \"\"\n rg_dict[\"CN\"] = \"UNKNOWN\"\n rg_dict[\"ID\"] = \"\"\n rg_dict[\"PL\"] = \"UNKNOWN\"\n rg_dict[\"LB\"] = \"\"\n rg_dict[\"SM\"] = \"\"\n rg_dict[\"PU\"] = \"\"\n rg_dict[\"DT\"] = \"\"\n sline = line.split('\\t')\n\n for item in sline:\n item = item.strip()\n\n if(item.startswith(\"ID:\")):\n rg_dict[\"ID\"] = item[3:]\n elif(item.startswith(\"PL:\")):\n rg_dict[\"PL\"] = item[3:]\n elif(item.startswith(\"PU:\")):\n item = item.replace(\".\", \"_\") #to agree with ICGC SOP\n rg_dict[\"PU\"] = item[3:]\n elif(item.startswith(\"LB:\")):\n rg_dict[\"LB\"] = item[3:]\n elif(item.startswith(\"DT:\")):\n rg_dict[\"DT\"] = item[3:]\n elif(item.startswith(\"SM:\")):\n rg_dict[\"SM\"] = item[3:]\n elif(item.startswith(\"CN:\")):\n rg_dict[\"CN\"] = item[3:]\n elif(item.startswith(\"PI:\")):\n rg_dict[\"PI\"] = item[3:]\n else:\n pass\n\n for key,value in rg_dict.items():\n if value == \"\":\n logger.warning(\"missing RG field %s\" % key)\n\n return rg_dict", "def read_lnet_stats(f):\n ret = {'send_count': 0, 'recv_count': 0, 'send_length':0, 'recv_length': 0}\n\n pfile = os.path.normpath(f) + \"/stats\"\n with open(pfile, \"r\") as f:\n for line in f:\n chopped = line.split()\n if chopped[3]:\n ret[\"send_count\"] = int(chopped[3])\n if chopped[4]:\n ret[\"recv_count\"] = int(chopped[4])\n if chopped[7]:\n ret[\"send_length\"] = int(chopped[7])\n\t\tif chopped[8]:\n\t\t ret[\"recv_length\"] = int(chopped[8])\t\n \n\n if ret['send_count'] == 0 and ret['recv_count'] == 0 and ret['send_length'] == 0 and ret['recv_length'] == 0 :\n return None\n\n return ret", "def _parse_line(self, line):\n fields = line.split('|', 4) # stop splitting after fourth | found\n line_info = {'raw_message': line}\n if len(fields) == 5:\n line_info.update(dict(zip(self._fieldnames, fields)))\n return line_info", "def _parse_interface(data):\n retval = dict(addresses=[])\n for line in data.split('\\n'):\n if line.startswith(' '):\n line = line.strip()\n if line.startswith('inet'):\n retval['addresses'].append(_parse_inet(line))\n elif 'link/ether' in line:\n retval['lladdr'] = _parse_lladdr(line)\n else:\n retval.update(_parse_head(line))\n\n return models.Interface.from_dict(retval)", "def extract_ip_from_line(self, line):\n\n ip = line.split()[1]\n try:\n ipaddress.ip_address(ip)\n except ValueError:\n if ip != \"*\":\n return None\n return ip", "def _parse_line(self):\n # check if line contains a rule or not\n stripped = self._line.strip()\n if not stripped or stripped.startswith(\"#\"):\n return None\n\n # strip out double quotes from values, and simplify equals strings\n simplified = self._line.replace(\"==\", \"=\").replace('\"', '')\n\n # return a dictionary formed from the key=value pairs found in line\n return dict(f.strip().split(\"=\", 1) for f in simplified.split(\",\"))", "def parseAttrLine(line):\n\tpre, post = line.strip().split(':')\n\tnumber, attr = pre.strip().split('.')\n\tattr = attr.strip().replace('%','').replace(' ', '-')\n\tvals = [clean(x) for x in post.strip().strip('.').split(',')]\n\treturn {'num':int(number), 'attr':clean(attr), 'vals':vals}", "def _result_to_dict(line):\n f = line.split(':;')\n return {'server': f[0], 'os_name': f[1], 'status': f[2], 'ipv4': f[3]}", "def test_read_host_subnet(self):\n pass", "def filter_create_subnet_attributes(subnet, context):\n pass", "def _linux_parse(line, s):\n output_line = {}\n\n if line.startswith('PING '):\n s.ipv4 = 'bytes of data' in line\n\n if s.ipv4 and line[5] not in string.digits:\n s.hostname = True\n # fixup for missing hostname\n line = line[:5] + 'nohost' + line[5:]\n elif s.ipv4 and line[5] in string.digits:\n s.hostname = False\n elif not s.ipv4 and ' (' in line:\n s.hostname = True\n else:\n s.hostname = False\n\n if s.ipv4 and not s.hostname:\n dst_ip, dta_byts = (2, 3)\n elif s.ipv4 and s.hostname:\n dst_ip, dta_byts = (2, 3)\n elif not s.ipv4 and not s.hostname:\n dst_ip, dta_byts = (2, 3)\n else:\n dst_ip, dta_byts = (3, 4)\n\n line = line.replace('(', ' ').replace(')', ' ')\n s.destination_ip = line.split()[dst_ip].lstrip('(').rstrip(')')\n s.sent_bytes = line.split()[dta_byts]\n\n return None\n\n if line.startswith('---'):\n s.footer = True\n return None\n\n if s.footer:\n if 'packets transmitted' in line:\n if ' duplicates,' in line:\n s.packets_transmitted = line.split()[0]\n s.packets_received = line.split()[3]\n s.packet_loss_percent = line.split()[7].rstrip('%')\n s.duplicates = line.split()[5].lstrip('+')\n s.time_ms = line.split()[11].replace('ms', '')\n return None\n\n s.packets_transmitted = line.split()[0]\n s.packets_received = line.split()[3]\n s.packet_loss_percent = line.split()[5].rstrip('%')\n s.duplicates = '0'\n s.time_ms = line.split()[9].replace('ms', '')\n return None\n\n split_line = line.split(' = ')[1]\n split_line = split_line.split('/')\n output_line = {\n 'type': 'summary',\n 'destination_ip': s.destination_ip or None,\n 'sent_bytes': s.sent_bytes or None,\n 'pattern': s.pattern or None,\n 'packets_transmitted': s.packets_transmitted or None,\n 'packets_received': s.packets_received or None,\n 'packet_loss_percent': s.packet_loss_percent or None,\n 'duplicates': s.duplicates or None,\n 'time_ms': s.time_ms or None,\n 'round_trip_ms_min': split_line[0],\n 'round_trip_ms_avg': split_line[1],\n 'round_trip_ms_max': split_line[2],\n 'round_trip_ms_stddev': split_line[3].split()[0]\n }\n\n return output_line\n\n # ping response lines\n\n # request timeout\n if 'no answer yet for icmp_seq=' in line:\n timestamp = False\n isequence = 5\n\n # if timestamp option is specified, then shift icmp sequence field right by one\n if line[0] == '[':\n timestamp = True\n isequence = 6\n\n output_line = {\n 'type': 'timeout',\n 'destination_ip': s.destination_ip or None,\n 'sent_bytes': s.sent_bytes or None,\n 'pattern': s.pattern or None,\n 'timestamp': line.split()[0].lstrip('[').rstrip(']') if timestamp else None,\n 'icmp_seq': line.replace('=', ' ').split()[isequence]\n }\n\n return output_line\n\n # normal responses\n if ' bytes from ' in line:\n\n line = line.replace('(', ' ').replace(')', ' ').replace('=', ' ')\n\n # positions of items depend on whether ipv4/ipv6 and/or ip/hostname is used\n if s.ipv4 and not s.hostname:\n bts, rip, iseq, t2l, tms = (0, 3, 5, 7, 9)\n elif s.ipv4 and s.hostname:\n bts, rip, iseq, t2l, tms = (0, 4, 7, 9, 11)\n elif not s.ipv4 and not s.hostname:\n bts, rip, iseq, t2l, tms = (0, 3, 5, 7, 9)\n elif not s.ipv4 and s.hostname:\n bts, rip, iseq, t2l, tms = (0, 4, 7, 9, 11)\n\n # if timestamp option is specified, then shift everything right by one\n timestamp = False\n if line[0] == '[':\n timestamp = True\n bts, rip, iseq, t2l, tms = (bts + 1, rip + 1, iseq + 1, t2l + 1, tms + 1)\n\n output_line = {\n 'type': 'reply',\n 'destination_ip': s.destination_ip or None,\n 'sent_bytes': s.sent_bytes or None,\n 'pattern': s.pattern or None,\n 'timestamp': line.split()[0].lstrip('[').rstrip(']') if timestamp else None,\n 'response_bytes': line.split()[bts],\n 'response_ip': line.split()[rip].rstrip(':'),\n 'icmp_seq': line.split()[iseq],\n 'ttl': line.split()[t2l],\n 'time_ms': line.split()[tms],\n 'duplicate': 'DUP!' in line\n }\n\n return output_line", "def _get_data(self):\n raw_data = self._get_raw_data()\n if not raw_data:\n return None\n result = {}\n for line in raw_data:\n if 'tcp' in line:\n parts = line.split()\n proto = parts[0]\n local_addr = parts[3]\n state = parts[5]\n ip, port = local_addr.rsplit(':', 1)\n port = str(port)\n result[port] = 1\n if state == 'LISTEN':\n if port not in self.charts['ports']:\n self.charts['ports'].add_dimension([port, port, 'absolute'])\n return result", "def _parse_line(self, line):\n with open(self._manifest.path, 'r') as manifest_file:\n if isinstance(line, str):\n assert line in self.BASE_INFORMATION.keys(), \\\n 'An attempt to get non-existent information from the manifest'\n for _ in range(self.BASE_INFORMATION[line]):\n fline = manifest_file.readline()\n return json.loads(fline)[line]\n else:\n assert self._index, 'No prepared index'\n offset = self._index[line]\n manifest_file.seek(offset)\n properties = manifest_file.readline()\n parsed_properties = ImageProperties(json.loads(properties))\n self._json_item_is_valid(**parsed_properties)\n return parsed_properties", "def parse_rating_dict(self, line):\n pass", "def read_neighnet(namefile):\n db = shelve.open(namefile)\n nif = db['nif']\n neighnet = db['neighnet']\n methodvalues = db['methodvalues']\n db.close()\n return nif, neighnet, methodvalues", "def subnet(self) -> Optional[pulumi.Input['ResourceIdArgs']]:\n return pulumi.get(self, \"subnet\")", "def find_net_info(sfe):\n print(\"-\" * 20 + \" find_net_info started\")\n virt_net = sfe.list_virtual_networks()\n json_virt_net = virt_net.to_json()\n #pprint(json_virt_net)\n virt_mask = json_virt_net['virtualNetworks'][0]['netmask']\n svip = json_virt_net['virtualNetworks'][0]['svip']\n\n # Break the netmask into constituent octets to get the one that determines the host network\n mask_oct1 = int(virt_mask.split(\".\")[0])\n mask_oct2 = int(virt_mask.split(\".\")[1])\n mask_oct3 = int(virt_mask.split(\".\")[2])\n mask_oct4 = int(virt_mask.split(\".\")[3])\n\n # Return the octet that has the determining bits\n if mask_oct1 != 255:\n oct_pos = 0\n comp_oct = mask_oct1\n elif mask_oct2 != 255:\n oct_pos = 1\n comp_oct = mask_oct2\n elif mask_oct3 != 255:\n oct_pos = 2\n comp_oct = mask_oct3\n else:\n oct_pos = 3\n comp_oct = mask_oct4\n\n # Find the network block size\n comp_block = 256 - comp_oct \n\n # Find the SVIP host bits\n comp_svip = int(svip.split(\".\")[oct_pos])\n int_svip = int(comp_svip)\n return int_svip, comp_block, oct_pos", "def __process_address(self, address: Tuple[int, int, int, int, int]) -> Dict[str, int]:\n return {\n 'interface': address[0],\n 'protocol': address[1],\n 'type': address[2],\n 'hardware_type': address[3],\n 'address': address[4],\n }", "def get_nets_lacnic(self, response):\n\n nets = []\n\n # Iterate through all of the networks found, storing the CIDR value\n # and the start and end positions.\n for match in re.finditer(\n r'^(inetnum|inet6num|route):[^\\S\\n]+(.+?,[^\\S\\n].+|.+)$',\n response,\n re.MULTILINE\n ):\n\n try:\n\n net = copy.deepcopy(BASE_NET)\n net_range = match.group(2).strip()\n\n try:\n\n net['range'] = net['range'] = '{0} - {1}'.format(\n ip_network(net_range)[0].__str__(),\n ip_network(net_range)[-1].__str__()\n ) if '/' in net_range else net_range\n\n except ValueError: # pragma: no cover\n\n net['range'] = net_range\n\n temp = []\n for addr in net_range.split(', '):\n\n count = addr.count('.')\n if count is not 0 and count < 4:\n\n addr_split = addr.strip().split('/')\n for i in range(count + 1, 4):\n addr_split[0] += '.0'\n\n addr = '/'.join(addr_split)\n\n temp.append(ip_network(addr.strip()).__str__())\n\n net['cidr'] = ', '.join(temp)\n net['start'] = match.start()\n net['end'] = match.end()\n nets.append(net)\n\n except ValueError:\n\n pass\n\n return nets" ]
[ "0.5925786", "0.57917464", "0.5593678", "0.55182445", "0.55075026", "0.52525884", "0.5221545", "0.5205276", "0.5152222", "0.5098498", "0.50964147", "0.5092493", "0.50791293", "0.5054493", "0.5048382", "0.5023908", "0.49907443", "0.49807265", "0.4974957", "0.49546623", "0.49181387", "0.4830451", "0.47990164", "0.4793929", "0.47924137", "0.47854522", "0.47802255", "0.47798827", "0.47730005", "0.47726396" ]
0.70451194
0
Return a triangle strip Gouraudshaded based on values at each vertex.
def gouraud_triangle_strip(triangle_strip, vertex_vals, shape, accumulate=False, background=0): triangle_strip = numpy.asarray(triangle_strip, dtype=numpy.float32, order='C') vertex_vals = numpy.asarray(vertex_vals, dtype=numpy.float32, order='C') assert triangle_strip.ndim == 2 and triangle_strip.shape[1] == 2 and len(triangle_strip) > 2 assert vertex_vals.ndim in (1, 2) unpack_out = False if vertex_vals.ndim == 1: vertex_vals = vertex_vals[:, numpy.newaxis] unpack_out = True assert len(vertex_vals) == len(triangle_strip) num_vertices = len(triangle_strip) out = numpy.empty(tuple(shape)+vertex_vals.shape[1:], dtype=numpy.float32, order='F') out.fill(background) _gouraud.gouraud_triangle_strip(num_vertices, _cast('float *', triangle_strip), _cast('float *', vertex_vals), _cast('float *', out), out.shape, out.strides, accumulate) if unpack_out: return out[:,:,0] else: return out.transpose((2,0,1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gouraud_triangles(triangle_strip, vertex_vals, shape):\n triangle_strip = numpy.asarray(triangle_strip)\n vertex_vals = numpy.asarray(vertex_vals)\n assert triangle_strip.ndim == 2 and triangle_strip.shape[1] == 2 and len(triangle_strip) > 2\n unpack_out = False\n if vertex_vals.ndim == 1:\n vertex_vals = vertex_vals[:, numpy.newaxis]\n unpack_out = True\n assert len(vertex_vals) == len(triangle_strip)\n grid = numpy.indices(shape) + 0.5 # pixel centers are at (0.5, 0.5 geometrically)\n outputs = [numpy.zeros(shape) for i in range(vertex_vals.shape[1])]\n mask = numpy.zeros(shape, dtype=bool)\n for i in range(len(triangle_strip) - 2):\n vertices = triangle_strip[i:i+3]\n vals = vertex_vals[i:i+3]\n xmn, ymn = numpy.floor(vertices.min(axis=0)).astype(int)\n xmx, ymx = numpy.ceil(vertices.max(axis=0)).astype(int) + 1\n xs, ys = slice(xmn, xmx), slice(ymn, ymx)\n b_coords = barycentric_coords(vertices, grid[:, xs, ys])\n m = (b_coords >= 0).all(axis=0)\n mask[xs, ys] |= m\n b_m = b_coords[:, m]\n for j, out in enumerate(outputs):\n out[xs, ys][m] = vals[:, j].dot(b_m)\n if unpack_out:\n outputs = outputs[0]\n return mask, outputs", "def draw_triangle(tup):\n x, y, z = tup[0], tup[1], tup[2]\n t_draw = turtle.Turtle()\n for index in range(3):\n t_draw.forward()", "def triangles_list_to_triangles_strip(blender_mesh):\n # TODO: Fix changing of face orientation in some cases (see tests)\n edges_faces = {}\n current_strip = []\n strips = []\n joined_strips = []\n faces_indices = deque(p.index for p in blender_mesh.polygons)\n done_faces_indices = set()\n current_face_index = faces_indices.popleft()\n process_faces = True\n\n for polygon in blender_mesh.polygons:\n for edge in polygon.edge_keys:\n edges_faces.setdefault(edge, set()).add(polygon.index)\n\n while process_faces:\n current_face = blender_mesh.polygons[current_face_index]\n current_face_verts = current_face.vertices[:]\n strip_indices = [v for v in current_face_verts if v not in current_strip[-2:]]\n if current_strip:\n face_to_add = tuple(current_strip[-2:]) + tuple(strip_indices)\n if face_to_add != current_face_verts and face_to_add != tuple(reversed(current_face_verts)):\n # we arrived here because the current face shares and edge with the face in the strip\n # however, if we just add the verts, we would be changing the direction of the face\n # so we create a degenerate triangle before adding to it to the strip\n current_strip.append(current_strip[-2])\n current_strip.extend(strip_indices)\n done_faces_indices.add(current_face_index)\n\n next_face_index = None\n possible_face_indices = {}\n for edge in current_face.edge_keys:\n if edge not in edges_faces:\n continue\n checked_edge = {face_index: edge for face_index in edges_faces[edge]\n if face_index != current_face_index and face_index not in done_faces_indices}\n possible_face_indices.update(checked_edge)\n for face_index, edge in possible_face_indices.items():\n if not current_strip:\n next_face_index = face_index\n break\n elif edge == tuple(current_strip[-2:]) or edge == tuple(reversed(current_strip[-2:])):\n next_face_index = face_index\n break\n elif edge == (current_strip[-1], current_strip[-2]):\n if len(current_strip) % 2 != 0:\n # create a degenerate triangle to join them\n current_strip.append(current_strip[-2])\n next_face_index = face_index\n\n if next_face_index:\n faces_indices.remove(next_face_index)\n current_face_index = next_face_index\n else:\n strips.append(current_strip)\n current_strip = []\n try:\n current_face_index = faces_indices.popleft()\n except IndexError:\n process_faces = False\n\n prev_strip_len = 0\n # join strips with degenerate triangles\n for strip in strips:\n if not prev_strip_len:\n joined_strips.extend(strip)\n prev_strip_len = len(strip)\n elif prev_strip_len % 2 == 0:\n joined_strips.extend((joined_strips[-1], strip[0]))\n joined_strips.extend(strip)\n prev_strip_len = len(strip)\n else:\n joined_strips.extend((joined_strips[-1], strip[0], strip[0]))\n joined_strips.extend(strip)\n prev_strip_len = len(strip)\n\n return joined_strips", "def accumulate_triangles(triangle_strip, shape):\n triangle_strip = numpy.asarray(triangle_strip)\n assert triangle_strip.ndim == 2 and triangle_strip.shape[1] == 2 and len(triangle_strip) > 2\n grid = numpy.indices(shape) + 0.5 # pixel centers are at (0.5, 0.5 geometrically)\n output = numpy.zeros(shape, dtype=int)\n for i in range(len(triangle_strip) - 2):\n vertices = triangle_strip[i:i+3]\n xmn, ymn = numpy.floor(vertices.min(axis=0)).astype(int)\n xmx, ymx = numpy.ceil(vertices.max(axis=0)).astype(int) + 1\n xs, ys = slice(xmn, xmx), slice(ymn, ymx)\n b_coords = barycentric_coords(vertices, grid[:, xs, ys])\n m = (b_coords >= 0).all(axis=0)\n output[xs, ys] += (b_coords >= 0).all(axis=0)\n return output", "def triangles_svg_path(self):\n verts = self.vertices.split(',') # leave as string\n tris = [int(v) for v in self.triangles.split(',')]\n data = []\n for i in xrange(0, len(tris), 3):\n v0 = 2 * tris[i]\n v1 = 2 * tris[i + 1]\n v2 = 2 * tris[i + 2]\n data.append(u\"M%s,%sL%s,%sL%s,%sz\" % (\n verts[v0], verts[v0 + 1],\n verts[v1], verts[v1 + 1],\n verts[v2], verts[v2 + 1],\n ))\n return u\"\".join(data)", "def mask_triangle_strip(triangle_strip, shape):\n triangle_strip = numpy.asarray(triangle_strip, dtype=numpy.float32, order='C')\n assert triangle_strip.ndim == 2 and triangle_strip.shape[1] == 2 and len(triangle_strip) > 2\n num_vertices = len(triangle_strip)\n out = numpy.zeros(tuple(shape), dtype=bool, order='F')\n _gouraud.mask_triangle_strip(num_vertices,\n _cast('float *', triangle_strip),\n _cast('char *', out),\n out.shape, out.strides)\n return out", "def triangleAdjacency( gen ):\n \n numTri = int( numberOfNodes( gen ) / 3. )\n \n return [ ( 3*i+j, 3*i+((j+1)%3) ) for j in range(3) for i in range(numTri) ]", "def triangle_contour(x_center, y_center, values, smoothing, ckwargs={}):\n # make Triangulation object using the centers of each of the hexbins\n triag = Triangulation(x_center, y_center)\n refiner = UniformTriRefiner(triag) # refines the mesh of triangle\n # returns refines triangle field of triangles and interpolated\n # contour values by dividing each triangle into 4**subdiv triangles\n tri_refi, c_refi = refiner.refine_field(values, subdiv=smoothing)\n T = pl.tricontour(tri_refi, c_refi, **ckwargs)\n return T", "def triangle(self):\n [r,c] = self.D\n m = min(r,c)\n S = self\n T = zeros(r,c)\n while m > 0:\n NoLigne = 0\n while S[NoLigne, 0] == 0 and (NoLigne < m - 1):\n NoLigne += 1\n S = S.swap(NoLigne,0)\n if S[0, 0] != 0:\n pivot = S[0,0]\n for k in range(1,m):\n if S[k,0] != 0:\n S = S.comb_lignes(pivot, -S[k,0],k,0)\n #print(\"pivot = \"+str(pivot))\n #print(\"S dans for :\")\n #print(S)\n T = T.remplace_ligned(r - m,S.F)\n #print(\"Évolution de T :\")\n #print(T)\n S = S.decoupe()\n m -= 1\n return T", "def triangle(self, freq: int, /) -> None:", "def exportTriangles(self):\n # Filter out triangles with any vertex in the extended BBox\n return [(a-4,b-4,c-4)\n for (a,b,c) in self.triangles if a > 3 and b > 3 and c > 3]", "def create_square_triangle_mesh():\n vertices = np.array(\n ((0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0.5, 0.5, 0)),\n dtype=np.float32)\n faces = np.array(\n ((0, 1, 4), (1, 3, 4), (3, 2, 4), (2, 0, 4)), dtype=np.int32)\n return vertices, faces", "def drawTriangle(t, color, x, y):\n ## t.color(color)\n ## t.begin_fill()\n for i in range(3):\n t.forward(x)\n t.right(y)", "def exportTriangles(self):\n # Filter out triangles with any vertex in the extended BBox\n return [(a-4, b-4, c-4)\n for (a, b, c) in self.triangles if a > 3 and b > 3 and c > 3]", "def triangle(row):\n if len(row) == 0:\n raise ValidationError(\"Row empty\")\n if len(row) == 1:\n return row\n if len(row) < 8:\n return small_triangles(row)\n make_steps()\n while len(row) > 50:\n streams = defaultdict(list)\n j = 0\n streams[j] = list(row)\n for i in range(len(row) - 4):\n for j in range(10):\n step = j * 4\n if i >= step:\n streams[j + 1].append(do_row(streams[j], i - step))\n row = streams[j]\n return small_triangles(row)", "def copy_triangle(tri):\n return [[peg for peg in row] for row in tri]", "def yield_equilateral_triangles(cls):\n for i in range(1, 201):\n yield i-.5, i-.5, i-.5\n yield i, i, i", "def triadic(R, G, B):\r\n RGB = [(R/255), (G/255), (B/255)]\r\n HLS = colorsys.rgb_to_hls(RGB[0], RGB[1], RGB[2])\r\n HLS_1 = [((((HLS[0]*360) + 120) % 360)/360), HLS[1], HLS[2]]\r\n HLS_2 = [((((HLS[0]*360) - 120) % 360)/360), HLS[1], HLS[2]]\r\n RGB_1 = colorsys.hls_to_rgb(HLS_1[0], HLS_1[1], HLS_1[2])\r\n RGB_2 = colorsys.hls_to_rgb(HLS_2[0], HLS_2[1], HLS_2[2])\r\n return [RGB255(RGB_1), RGB255(RGB_2)]", "def triangulate(self):\n npts = self._vertices.shape[0]\n if np.any(self._vertices[0] != self._vertices[1]):\n # start != end, so edges must wrap around to beginning.\n edges = np.empty((npts, 2), dtype=np.uint32)\n edges[:, 0] = np.arange(npts)\n edges[:, 1] = edges[:, 0] + 1\n edges[-1, 1] = 0\n else:\n # start == end; no wrapping required.\n edges = np.empty((npts-1, 2), dtype=np.uint32)\n edges[:, 0] = np.arange(npts)\n edges[:, 1] = edges[:, 0] + 1\n\n tri = Triangulation(self._vertices, edges)\n tri.triangulate()\n return tri.pts, tri.tris", "def defgrads(verts_src, verts_deformed, tris):\n S0 = get_triangle_frames(verts_src, tris)\n S1 = get_triangle_frames(verts_deformed, tris)\n return matmat(S1, inv3(S0))", "def create_intermediate_triangle(source_tri, target_tri, alpha):\n return ((get_point_in_segment(source_tri[0], target_tri[0], alpha)),\n (get_point_in_segment(source_tri[1], target_tri[1], alpha)),\n (get_point_in_segment(source_tri[2], target_tri[2], alpha)))", "def drawTwoTriangles():\n\n drawTriangle(200,100,\"blue\",\"pink\")\n Lucia.up()\n Lucia.forward(220)\n Lucia.down()\n drawTriangle(100,200,\"grey\",\"blue\")\n Lucia.seth(0)", "def tri(x, y, colour, invert=0, a=100):\n x *= cell_size\n y *= cell_size\n\n noStroke()\n fill(colour[0], colour[1], colour[2], a)\n if invert == 1: # top right\n triangle(x, y, x + cell_size, y, x + cell_size, y + cell_size)\n cx, cy = cell_size + x - in_x, y + in_y\n elif invert == 2: # bottom right\n triangle(x + cell_size, y, x + cell_size, y + cell_size, x, y + cell_size)\n cx, cy = cell_size + x - in_x, cell_size + y - in_y\n elif invert == 3: # bottom left\n triangle(x, y, x + cell_size, y + cell_size, x, y + cell_size)\n cx, cy = x + in_x, cell_size + y - in_y\n else: # top left\n triangle(x, y, x + cell_size, y, x, y + cell_size)\n cx, cy = x + in_x, y + in_y\n \n # randomly draw a circle inside some triangles?\n # calculate x/y distance from corner\n # maybe randomly draw the incircle in the space not filled by the triangle\n \n if randint(0, 100) > 97 and incircles:\n fill(random(360), 50, 90, 100)\n circle(cx, cy, int(in_x*2))", "def triangle(self):\n \n R = Householder.triangle_operation(self)[0] \n \n return(R.round(10))", "def lighter(clr, f=1/3):\n gaps = [f*(1 - val) for val in clr]\n new_clr = [val + gap for gap, val in zip(gaps, clr)]\n return new_clr", "def draw_triangle(vertices, shape):\n # add 0.5 to account for fact that pixel centers are at (0.5, 0.5)\n barycenters = barycentric_coords(vertices, numpy.indices(shape) + 0.5)\n return (barycenters >= 0).all(axis=0)", "def format_triangle(triangle):\n rows = (tuple(int(num) for num in row.strip().split(' '))\n for row in triangle.split('\\n') if row)\n return reversed(tuple(rows))", "def split_triangles(mesh):\n triangles = np.asarray(mesh.triangles).copy()\n vertices = np.asarray(mesh.vertices).copy()\n\n triangles_3 = np.zeros_like(triangles)\n vertices_3 = np.zeros((len(triangles) * 3, 3), dtype=vertices.dtype)\n\n for index_triangle, t in enumerate(triangles):\n index_vertex = index_triangle * 3\n vertices_3[index_vertex] = vertices[t[0]]\n vertices_3[index_vertex + 1] = vertices[t[1]]\n vertices_3[index_vertex + 2] = vertices[t[2]]\n\n triangles_3[index_triangle] = np.arange(index_vertex, index_vertex + 3)\n\n mesh_return = deepcopy(mesh)\n mesh_return.triangles = o3d.utility.Vector3iVector(triangles_3)\n mesh_return.vertices = o3d.utility.Vector3dVector(vertices_3)\n mesh_return.triangle_normals = mesh.triangle_normals\n mesh_return.paint_uniform_color([0.5, 0.5, 0.5])\n return mesh_return", "def show_vertex_colors():\n if bpy.app.version > (2, 80, 0):\n for area in bpy.context.screen.areas:\n if area.type == 'VIEW_3D':\n for space in area.spaces:\n if space.type == 'VIEW_3D':\n space.shading.type = 'SOLID'\n space.shading.color_type = 'VERTEX'", "def zigzag_upper_triangle(n_values):\n x_indices = [0]\n x_max = 1\n while len(x_indices) < n_values:\n indices = list(range(x_max + 1))\n x_indices += indices + [x_max + 1] + indices[::-1]\n x_max += 2\n\n y_indices = []\n y_max = 0\n while len(y_indices) < n_values:\n indices = list(range(y_max + 1))\n y_indices += indices + [y_max + 1] + indices[::-1]\n y_max += 2\n \n return x_indices[:n_values], y_indices[:n_values]" ]
[ "0.70117754", "0.59974504", "0.58845705", "0.58478296", "0.58071446", "0.57811844", "0.5730023", "0.5624095", "0.56165123", "0.56160176", "0.5601126", "0.555866", "0.55583763", "0.5546502", "0.5501326", "0.5475135", "0.5457648", "0.5451304", "0.54182714", "0.5414708", "0.53949195", "0.5377671", "0.5346236", "0.5335551", "0.53354317", "0.531471", "0.52989894", "0.5290146", "0.5285783", "0.5262841" ]
0.7244856
0
Return a triangle strip rasterized into a boolean mask. Mask is guaranteed to be identical to the region drawn by gouraud_triangle_strip, which is not necessarily exactly the case for draw_mask() (which uses a slightly different algorithm internally).
def mask_triangle_strip(triangle_strip, shape): triangle_strip = numpy.asarray(triangle_strip, dtype=numpy.float32, order='C') assert triangle_strip.ndim == 2 and triangle_strip.shape[1] == 2 and len(triangle_strip) > 2 num_vertices = len(triangle_strip) out = numpy.zeros(tuple(shape), dtype=bool, order='F') _gouraud.mask_triangle_strip(num_vertices, _cast('float *', triangle_strip), _cast('char *', out), out.shape, out.strides) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gouraud_triangles(triangle_strip, vertex_vals, shape):\n triangle_strip = numpy.asarray(triangle_strip)\n vertex_vals = numpy.asarray(vertex_vals)\n assert triangle_strip.ndim == 2 and triangle_strip.shape[1] == 2 and len(triangle_strip) > 2\n unpack_out = False\n if vertex_vals.ndim == 1:\n vertex_vals = vertex_vals[:, numpy.newaxis]\n unpack_out = True\n assert len(vertex_vals) == len(triangle_strip)\n grid = numpy.indices(shape) + 0.5 # pixel centers are at (0.5, 0.5 geometrically)\n outputs = [numpy.zeros(shape) for i in range(vertex_vals.shape[1])]\n mask = numpy.zeros(shape, dtype=bool)\n for i in range(len(triangle_strip) - 2):\n vertices = triangle_strip[i:i+3]\n vals = vertex_vals[i:i+3]\n xmn, ymn = numpy.floor(vertices.min(axis=0)).astype(int)\n xmx, ymx = numpy.ceil(vertices.max(axis=0)).astype(int) + 1\n xs, ys = slice(xmn, xmx), slice(ymn, ymx)\n b_coords = barycentric_coords(vertices, grid[:, xs, ys])\n m = (b_coords >= 0).all(axis=0)\n mask[xs, ys] |= m\n b_m = b_coords[:, m]\n for j, out in enumerate(outputs):\n out[xs, ys][m] = vals[:, j].dot(b_m)\n if unpack_out:\n outputs = outputs[0]\n return mask, outputs", "def polygon_to_mask_array(dims: tuple, vertices: CoordinatePair) -> np.ndarray:\n\n poly_vertices = [\n (vertices.x_ul, vertices.y_ul),\n (vertices.x_ul, vertices.y_br),\n (vertices.x_br, vertices.y_br),\n (vertices.x_br, vertices.y_ul),\n ]\n\n img = PIL.Image.new(\"L\", dims, 0)\n PIL.ImageDraw.Draw(img).polygon(poly_vertices, outline=1, fill=1)\n return np.array(img).astype(bool)", "def get_mask(self, shape):\n h, w = shape[0:2]\n y, x = np.mgrid[:h, :w]\n points = np.transpose((x.ravel(), y.ravel()))\n\n mask = _nxutils_points_inside_poly(points, self.verts)\n #mask = nxutils.points_inside_poly(points, self.verts)\n return mask.reshape(h, w)", "def as_boolean_mask(self):\n bbox = self.bbox()\n zs = np.unique([c.image_z_position for c in self.contours])\n z_to_index = dict(zip(zs,range(len(zs))))\n\n # Get dimensions, initialize mask.\n nx,ny = np.diff(bbox[:2], axis=1).astype(int) + 1\n nx = int(nx); ny = int(ny)\n nz = int(zs.shape[0])\n mask = np.zeros((nx,ny,nz), dtype=np.bool)\n\n # We check if these points are enclosed within each contour \n # for a given slice. `test_points` is a list of image coordinate \n # points, offset by the bounding box.\n test_points = bbox[:2,0] + np.c_[ np.where(~mask[:,:,0]) ]\n\n # First we \"turn on\" pixels enclosed by inclusion contours.\n for contour in self.contours:\n if contour.inclusion:\n zi = z_to_index[contour.image_z_position]\n contour_matrix = contour.to_matrix()[:,:2]\n\n # Turn the contour closed if it's not.\n if (contour_matrix[0] != contour_matrix[-1]).all():\n contour_matrix = np.append(contour_matrix,\n contour_matrix[0].reshape(1,2),\n axis=0)\n\n # Create path object and test all pixels\n # within the contour's bounding box.\n path = mplpath.Path(contour_matrix, closed=True)\n contains_pts = path.contains_points(test_points)\n mask[:,:,zi] = contains_pts.reshape(mask.shape[:2])\n\n # Second, we \"turn off\" pixels enclosed by exclusion contours.\n for contour in self.contours:\n if not contour.inclusion:\n zi = z_to_index[contour.image_z_position]\n contour_matrix = contour.to_matrix()[:,:2]\n\n # Turn the contour closed if it's not.\n if (contour_matrix[0] != contour_matrix[-1]).all():\n contour_matrix = np.append(contour_matrix,\n contour_matrix[0].reshape(1,2),\n axis=0)\n\n path = mplpath.Path(contour_matrix, closed=True)\n not_contains_pts = ~path.contains_points(test_points)\n not_contains_pts = not_contains_pts.reshape(mask.shape[:2])\n mask[:,:,zi] = np.logical_and(mask[:,:,zi], not_contains_pts)\n\n # The first and second axes have to \n # be swapped because of the reshape.\n return mask.swapaxes(0,1), bbox[[1,0,2]]", "def get_vertices_mask(poly, mask):\n h = mask.shape[0]\n w = mask.shape[1]\n gt_poly = np.zeros((poly.shape[0],poly.shape[1]),np.int32)\n gt_poly[:,0] = np.floor(poly[:,0]*w)\n gt_poly[:,1] = np.floor(poly[:,1]*h)\n\n mask[gt_poly[:, 1], gt_poly[:, 0]] = 1.0\n\n return mask", "def generate_inpaint_mask(n_samples, n_colors, spatial_width):\n mask = np.zeros((n_samples, n_colors, spatial_width, spatial_width), dtype=bool)\n # simple mask -- just mask out half the image\n mask[:,:,:,spatial_width/2:] = True\n return mask.ravel()", "def get_mask(total, begin, end):\n mask = np.zeros([total]).astype(np.float32)\n mask[begin:end] = 1\n return np.array(mask, dtype=np.bool)", "def accumulate_triangles(triangle_strip, shape):\n triangle_strip = numpy.asarray(triangle_strip)\n assert triangle_strip.ndim == 2 and triangle_strip.shape[1] == 2 and len(triangle_strip) > 2\n grid = numpy.indices(shape) + 0.5 # pixel centers are at (0.5, 0.5 geometrically)\n output = numpy.zeros(shape, dtype=int)\n for i in range(len(triangle_strip) - 2):\n vertices = triangle_strip[i:i+3]\n xmn, ymn = numpy.floor(vertices.min(axis=0)).astype(int)\n xmx, ymx = numpy.ceil(vertices.max(axis=0)).astype(int) + 1\n xs, ys = slice(xmn, xmx), slice(ymn, ymx)\n b_coords = barycentric_coords(vertices, grid[:, xs, ys])\n m = (b_coords >= 0).all(axis=0)\n output[xs, ys] += (b_coords >= 0).all(axis=0)\n return output", "def gouraud_triangle_strip(triangle_strip, vertex_vals, shape, accumulate=False, background=0):\n triangle_strip = numpy.asarray(triangle_strip, dtype=numpy.float32, order='C')\n vertex_vals = numpy.asarray(vertex_vals, dtype=numpy.float32, order='C')\n assert triangle_strip.ndim == 2 and triangle_strip.shape[1] == 2 and len(triangle_strip) > 2\n assert vertex_vals.ndim in (1, 2)\n unpack_out = False\n if vertex_vals.ndim == 1:\n vertex_vals = vertex_vals[:, numpy.newaxis]\n unpack_out = True\n assert len(vertex_vals) == len(triangle_strip)\n num_vertices = len(triangle_strip)\n out = numpy.empty(tuple(shape)+vertex_vals.shape[1:], dtype=numpy.float32, order='F')\n out.fill(background)\n _gouraud.gouraud_triangle_strip(num_vertices,\n _cast('float *', triangle_strip),\n _cast('float *', vertex_vals),\n _cast('float *', out),\n out.shape, out.strides, accumulate)\n if unpack_out:\n return out[:,:,0]\n else:\n return out.transpose((2,0,1))", "def get_arrow_mask(self, frame):\n # Adapte contrast and put in grayscale\n img = skimage.exposure.equalize_adapthist(frame)\n img = skimage.color.rgb2gray(img)\n # # apply Otsu thresholding method\n thres = skimage.filters.threshold_otsu(img)\n mask = np.where(img < skimage.filters.threshold_otsu(img), True, False)\n # Morphological cleaning\n mask = skimage.morphology.binary_opening(mask, skimage.morphology.disk(5))\n # Remove corners of room\n mask = skimage.segmentation.flood_fill(mask, (0,0), 0)\n mask = skimage.segmentation.flood_fill(mask, (0,mask.shape[1]-1), 0)\n mask = skimage.segmentation.flood_fill(mask, (mask.shape[0]-1,0), 0)\n mask = skimage.segmentation.flood_fill(mask, (mask.shape[0]-1,mask.shape[1]-1), 0)\n\n return mask", "def draw_triangle(vertices, shape):\n # add 0.5 to account for fact that pixel centers are at (0.5, 0.5)\n barycenters = barycentric_coords(vertices, numpy.indices(shape) + 0.5)\n return (barycenters >= 0).all(axis=0)", "def test_make_mask(self):\n output_mask = footprint_mask(os.path.join(data_dir, 'sample.csv'),\n geom_col=\"PolygonWKT_Pix\")\n truth_mask = skimage.io.imread(os.path.join(data_dir,\n 'sample_fp_mask.tif'))\n\n assert np.array_equal(output_mask, truth_mask)", "def __generate_mask(self):\n mask = np.concatenate([np.ones(len(self.fixed[0])),\n np.zeros(self.num_points),\n np.ones(len(self.fixed[1]))])\n return mask", "def mask_glare(img, threshold=175, mask_only=False):\n # if img.dtype is floating but threshold value is still the default\n # this could be generalized\n if np.issubdtype(img.dtype, np.floating) and (threshold == 175):\n threshold = 175 / 255\n # region to inpaint\n inp = (img > threshold)\n\n # get a larger area around the specks\n inp = binary_dilation(inp, selem=disk(2))\n\n # remove anything large\n #inp = white_tophat(inp, selem=disk(3))\n\n if mask_only:\n return inp\n else:\n # both the original background *and* these new glared regions\n # are masked\n return ma.masked_array(img, mask=inp)", "def geometry_mask(self, gdf, all_touched=False, invert=False, **kwargs):\n gdf1 = gdf.copy()\n gdf1[\"mask\"] = np.full(gdf.index.size, (not invert), dtype=np.uint8)\n da_out = self.rasterize(\n gdf1,\n col_name=\"mask\",\n all_touched=all_touched,\n nodata=np.uint8(invert),\n **kwargs,\n )\n # remove nodata value before converting to boolean\n da_out.attrs.pop(\"_FillValue\", None)\n return da_out.astype(bool)", "def create_region_mask(latitude_array, target_shape, lat_bounds):\n\n target_ndim = len(target_shape)\n\n southern_lat, northern_lat = lat_bounds\n mask_array = numpy.where((latitude_array >= southern_lat) & (latitude_array < northern_lat), False, True)\n\n mask = uconv.broadcast_array(mask_array, [target_ndim - 2, target_ndim - 1], target_shape)\n assert mask.shape == target_shape \n\n return mask", "def create_mask(shape):\n return np.zeros(shape).astype(bool)", "def shp_mask(shp, x, y, m=None):\n\n if isinstance(shp, (tuple, list, np.ndarray)):\n shp = Polygon(shp)\n\n rect = _bbox_to_rect(_grid_bbox(x, y))\n\n if m is None:\n m = np.zeros((y.size, x.size), dtype=bool)\n\n if not shp.intersects(rect):\n m[:] = False\n elif shp.contains(rect):\n m[:] = True\n\n else:\n i, j = m.shape\n\n if i == 1 and j == 1:\n m[:] = shp.contains(Point(x[0], y[0]))\n\n elif i == 1:\n m[:, :j//2] = shp_mask(shp, x[:j//2], y, m[:, :j//2])\n m[:, j//2:] = shp_mask(shp, x[j//2:], y, m[:, j//2:])\n\n elif j == 1:\n m[:i//2] = shp_mask(shp, x, y[:i//2], m[:i//2])\n m[i//2:] = shp_mask(shp, x, y[i//2:], m[i//2:])\n\n else:\n m[:i//2, :j//2] = shp_mask(shp,\n x[:j//2], y[:i//2], m[:i//2, :j//2])\n m[:i//2, j//2:] = shp_mask(shp,\n x[j//2:], y[:i//2], m[:i//2, j//2:])\n m[i//2:, :j//2] = shp_mask(shp,\n x[:j//2], y[i//2:], m[i//2:, :j//2])\n m[i//2:, j//2:] = shp_mask(shp,\n x[j//2:], y[i//2:], m[i//2:, j//2:])\n\n return m", "def triuind(x):\n return _band_part_inverted(x, -1, 0)", "def generate_square_subsequent_mask(sz: int, device: torch.device) -> torch.Tensor:\n mask = (torch.triu(torch.ones(sz, sz, device=device)) == 1).transpose(0, 1)\n mask = (\n mask.float()\n .masked_fill(mask == 0, float(\"-inf\"))\n .masked_fill(mask == 1, float(0.0))\n )\n return mask", "def mask2trimap(self, mask):\n fg_mask = (mask > 0).float()\n bg_mask = (mask < 0).float()\n trimap_width = getattr(self.opt, 'trimap_width', 20)\n trimap_width *= bg_mask.shape[-1] / self.opt.width\n trimap_width = int(trimap_width)\n bg_mask = cv2.erode(bg_mask.numpy(), kernel=np.ones((trimap_width, trimap_width)), iterations=1)\n bg_mask = torch.from_numpy(bg_mask)\n mask = fg_mask - bg_mask\n return mask", "def _prep_mask(dataset, trial_split):\n split_to_mask = lambda x: (dataset.trial_info.split == x) if isinstance(x, str) else x\n if isinstance(trial_split, list):\n trial_mask = np.any([split_to_mask(split) for split in trial_split], axis=0)\n else:\n trial_mask = split_to_mask(trial_split)\n return trial_mask", "def test_make_outer_mask_from_fp(self):\n fp_mask = skimage.io.imread(os.path.join(data_dir,\n 'sample_fp_mask.tif'))\n output_mask = boundary_mask(fp_mask, boundary_type=\"outer\")\n truth_mask = skimage.io.imread(os.path.join(data_dir,\n 'sample_b_mask_outer.tif'))\n\n assert np.array_equal(output_mask, truth_mask)", "def subsequent_mask(size: int) -> Tensor:\n mask = np.triu(np.ones((1, size, size)), k=1).astype(\"uint8\")\n return torch.from_numpy(mask) == 0", "def get_mask(self):\n w, h = self.rect.w, self.rect.h\n colorkey = (0, 0, 0)\n surface = pg.Surface((w, h))\n surface.set_colorkey(colorkey)\n # fill the surface with the spherical object\n color, center, radius = (255, 255, 255), self.rect.center, round(self.rect.w/2)\n pg.draw.circle(surface, color, center, radius)\n mask = pg.mask.from_surface(surface)\n return mask", "def binary_mask_to_polygon(binary_mask, tolerance=0):\n polygons = []\n # pad mask to close contours of shapes which start and end at an edge\n padded_binary_mask = np.pad(binary_mask, pad_width=1, mode='constant', constant_values=0)\n contours = measure.find_contours(padded_binary_mask, 0.5)\n contours = np.subtract(contours, 1)\n for contour in contours:\n contour = close_contour(contour)\n contour = measure.approximate_polygon(contour, tolerance)\n if len(contour) < 3:\n continue\n contour = np.flip(contour, axis=1)\n segmentation = contour\n # after padding and subtracting 1 we may get -0.5 points in our segmentation\n segmentation = [np.clip(i,0.0,i).tolist() for i in segmentation]\n polygons.append(segmentation)\n\n return polygons", "def gen_background_mask( img ):\n\t\t\n\tif len( img.shape ) == 3: t = img[0]\n\telif len( img.shape ) == 2: t = img\n\n\tmask = img > filters.threshold_li(t)\n\n\treturn mask", "def boundary_triangles(TRI, boundary):\n # Look for triangles in TRI that contain 2 elements on the boundary\n # (ie they have a boundary edge in the triangle)\n inb0 = np.where(np.in1d(TRI[:, 0], boundary))[0]\n inb1 = np.where(np.in1d(TRI[:, 1], boundary))[0]\n inb2 = np.where(np.in1d(TRI[:, 2], boundary))[0]\n inb_all = np.hstack((inb0, inb1, inb2)).ravel()\n # print 'inb_all = ', inb_all\n\n # Look for indices that appear twice in cat( inb0,inb1,inb2).\n s = np.sort(inb_all, axis=None)\n btris = s[s[1:] == s[:-1]]\n\n # If any values are repeated in btri, that means all three vertices are boundary.\n # Keep these. Also, remove from the list any tris that share two points with one of these tris.\n # --> this is because this means an edge (not a boundary edge) connects two boundary particles,\n # and cuts off another particle.\n btri_repeats = btris[btris[1:] == btris[:-1]]\n # print 'TRI = ', TRI\n # print 'btris = ', btris\n # print 'btri_repeats = ', btri_repeats\n\n # btri = np.setdiff1d(btris,btri_repeats)\n btris = np.unique(btris)\n\n # If any btri triangles share an edge with a btri_repeats (they share 2 points),\n # kill the btri triangle.\n mask = np.ones(len(btris), dtype=bool)\n for ii in range(len(btris)):\n # if this one isn't itself a repeat, check against all brtri_repeats\n if not np.in1d(btris[ii], btri_repeats):\n tri0 = TRI[btris[ii]]\n for btr in btri_repeats:\n tri1 = TRI[btr]\n if len(np.intersect1d(tri0, tri1, assume_unique=True)) > 1:\n # print 'matching = ', np.intersect1d(tri0,tri1,assume_unique=True)\n mask[ii] = False\n btri = btris[mask]\n\n return btri", "def remove_shadow(patch):\r\n lt = not np.any(patch[0,0])\r\n rt = not np.any(patch[0,-1])\r\n lb = not np.any(patch[-1,0])\r\n rb = not np.any(patch[-1,-1])\r\n\r\n return lt or rt or lb or rb", "def binary_mask_to_polygon(binary_mask, tolerance=0):\r\n\r\n polygons = []\r\n if isinstance(binary_mask, torch.Tensor):\r\n binary_mask = binary_mask.cpu().numpy()\r\n # pad mask to close contours of shapes which start and end at an edge\r\n padded_binary_mask = np.pad(binary_mask, pad_width=1, mode='constant', constant_values=0)\r\n contours = measure.find_contours(padded_binary_mask, 0.5)\r\n contours = np.subtract(contours, 1)\r\n for contour in contours:\r\n contour = close_contour(contour)\r\n contour = measure.approximate_polygon(contour, tolerance)\r\n if len(contour) < 3:\r\n continue\r\n contour = np.flip(contour, axis=1) # x, y\r\n polygon = np.maximum(contour, 0)\r\n #segmentation = contour.ravel().tolist()\r\n # after padding and subtracting 1 we may get -0.5 points in our segmentation\r\n #segmentation = [0 if i < 0 else i for i in segmentation]\r\n polygons.append(polygon)\r\n\r\n return polygons" ]
[ "0.660964", "0.572978", "0.56846446", "0.56614095", "0.5645633", "0.5615826", "0.56016004", "0.55722696", "0.5571411", "0.5390756", "0.5227696", "0.52020264", "0.5185661", "0.5173398", "0.5168185", "0.51425934", "0.5140048", "0.513411", "0.51229304", "0.5054843", "0.504088", "0.5015828", "0.50002086", "0.4993942", "0.49938497", "0.4989856", "0.4989756", "0.49805647", "0.49788535", "0.49753982" ]
0.77402407
0
We don't really care how this happens as long as the worker winds up with a worker pool with a correct base_job_template when creating a new work pool
async def test_base_worker_gets_job_configuration_when_syncing_with_backend_with_job_config_and_variables( session, client ): class WorkerJobConfig(BaseJobConfiguration): other: Optional[str] = Field(template="{{ other }}") class WorkerVariables(BaseVariables): other: Optional[str] = Field(default="woof") # Add a job configuration and variables for the worker (currently used to create template # if not found on the worker pool) WorkerTestImpl.job_configuration = WorkerJobConfig WorkerTestImpl.job_configuration_variables = WorkerVariables pool_name = "test-pool" # Create a new worker pool response = await client.post( "/work_pools/", json=dict(name=pool_name, type="test-type") ) result = pydantic.parse_obj_as(schemas.core.WorkPool, response.json()) model = await models.workers.read_work_pool(session=session, work_pool_id=result.id) assert model.name == pool_name # Create a worker with the new pool and sync with the backend worker = WorkerTestImpl( name="test", work_pool_name=pool_name, ) async with get_client() as client: worker._client = client await worker.sync_with_backend() assert ( worker._work_pool.base_job_template == WorkerTestImpl.get_default_base_job_template() )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_poolStartServiceChecksForWork(self):\n reactor = MemoryReactorWithClock()\n cph = SteppablePoolHelper(jobSchema + schemaText)\n then = datetime.datetime(2012, 12, 12, 12, 12, 0)\n reactor.advance(astimestamp(then))\n cph.setUp(self)\n pcp = ControllerQueue(reactor, cph.pool.connection, useWorkerPool=False)\n now = then + datetime.timedelta(seconds=20)\n\n @transactionally(cph.pool.connection)\n def createOldWork(txn):\n one = DummyWorkItem.makeJob(txn, jobID=1, workID=1, a=3, b=4, notBefore=then)\n two = DummyWorkItem.makeJob(txn, jobID=2, workID=2, a=7, b=9, notBefore=now)\n return gatherResults([one, two])\n\n pcp.startService()\n cph.flushHolders()\n reactor.advance(19)\n self.assertEquals(\n DummyWorkItem.results,\n {1: 7}\n )\n reactor.advance(20)\n self.assertEquals(\n DummyWorkItem.results,\n {1: 7, 2: 16}\n )", "async def create_work_pool(\n work_pool: schemas.actions.WorkPoolCreate,\n db: OrionDBInterface = Depends(provide_database_interface),\n) -> schemas.core.WorkPool:\n\n if work_pool.name.lower().startswith(\"prefect\"):\n raise HTTPException(\n status_code=status.HTTP_403_FORBIDDEN,\n detail=\"Worker pools starting with 'Prefect' are reserved for internal use.\",\n )\n\n try:\n async with db.session_context(begin_transaction=True) as session:\n model = await models.workers.create_work_pool(\n session=session, work_pool=work_pool, db=db\n )\n except sa.exc.IntegrityError:\n raise HTTPException(\n status_code=status.HTTP_409_CONFLICT,\n detail=\"A worker with this name already exists.\",\n )\n\n return model", "async def test_base_worker_gets_job_configuration_when_syncing_with_backend_with_just_job_config(\n session, client\n):\n\n class WorkerJobConfig(BaseJobConfiguration):\n other: Optional[str] = Field(template=\"{{other}}\")\n\n # Add a job configuration for the worker (currently used to create template\n # if not found on the worker pool)\n WorkerTestImpl.job_configuration = WorkerJobConfig\n\n expected_job_template = {\n \"job_configuration\": {\n \"command\": \"{{ command }}\",\n \"env\": \"{{ env }}\",\n \"labels\": \"{{ labels }}\",\n \"name\": \"{{ name }}\",\n \"other\": \"{{ other }}\",\n },\n \"variables\": {\n \"properties\": {\n \"command\": {\n \"type\": \"string\",\n \"title\": \"Command\",\n \"description\": (\n \"The command to use when starting a flow run. \"\n \"In most cases, this should be left blank and the command \"\n \"will be automatically generated by the worker.\"\n ),\n },\n \"env\": {\n \"title\": \"Environment Variables\",\n \"type\": \"object\",\n \"additionalProperties\": {\"type\": \"string\"},\n \"description\": (\n \"Environment variables to set when starting a flow run.\"\n ),\n },\n \"labels\": {\n \"title\": \"Labels\",\n \"type\": \"object\",\n \"additionalProperties\": {\"type\": \"string\"},\n \"description\": (\n \"Labels applied to infrastructure created by the worker using \"\n \"this job configuration.\"\n ),\n },\n \"name\": {\n \"type\": \"string\",\n \"title\": \"Name\",\n \"description\": (\n \"Name given to infrastructure created by the worker using this \"\n \"job configuration.\"\n ),\n },\n \"other\": {\"type\": \"string\", \"title\": \"Other\"},\n },\n \"type\": \"object\",\n },\n }\n\n pool_name = \"test-pool\"\n\n # Create a new worker pool\n response = await client.post(\n \"/work_pools/\", json=dict(name=pool_name, type=\"test-type\")\n )\n result = pydantic.parse_obj_as(schemas.core.WorkPool, response.json())\n model = await models.workers.read_work_pool(session=session, work_pool_id=result.id)\n assert model.name == pool_name\n\n # Create a worker with the new pool and sync with the backend\n worker = WorkerTestImpl(\n name=\"test\",\n work_pool_name=pool_name,\n )\n async with get_client() as client:\n worker._client = client\n await worker.sync_with_backend()\n\n assert worker._work_pool.base_job_template == expected_job_template", "def prepare_pr_condor_job(self, pool_type, pool_address, number_of_jobs, subtask_index, data_files, rank='0', extraArgs=''):\n ############\n copasi_file = 'auto_copasi_%d.$(Process).cps' % subtask_index\n output_file = 'output_%d.$(Process).txt' % subtask_index\n \n \n \n if pool_type == 'ec2':\n binary_dir = '/usr/local/bin'\n transfer_executable = 'NO'\n else:\n binary_dir, binary = os.path.split(settings.COPASI_LOCAL_BINARY)\n transfer_executable = 'YES'\n \n input_files_string = ', '\n for data_file in data_files:\n input_files_string += (data_file + ', ')\n input_files_string = input_files_string.rstrip(', ')\n\n condor_job_string = Template(condor_spec.raw_condor_job_string).substitute(copasiFile=copasi_file, \n otherFiles=input_files_string,\n rank=rank,\n binary_dir = binary_dir,\n transfer_executable = transfer_executable,\n pool_type = pool_type,\n pool_address = pool_address,\n subtask=str(subtask_index),\n n = number_of_jobs,\n outputFile = output_file,\n extraArgs='',\n )\n \n condor_job_filename = 'auto_condor_%d.job'%subtask_index\n condor_job_full_filename = os.path.join(self.path, condor_job_filename)\n condor_file = open(condor_job_full_filename, 'w')\n condor_file.write(condor_job_string)\n condor_file.close()\n\n return condor_job_filename", "def _setupPools(self):\n reactor = MemoryReactorWithClock()\n cph = SteppablePoolHelper(jobSchema + schemaText)\n then = datetime.datetime(2012, 12, 12, 12, 12, 12)\n reactor.advance(astimestamp(then))\n cph.setUp(self)\n qpool = ControllerQueue(reactor, cph.pool.connection, useWorkerPool=False)\n\n realChoosePerformer = qpool.choosePerformer\n performerChosen = []\n\n def catchPerformerChoice(onlyLocally=False):\n result = realChoosePerformer(onlyLocally=onlyLocally)\n performerChosen.append(True)\n return result\n\n qpool.choosePerformer = catchPerformerChoice\n reactor.callLater(0, qpool._workCheck)\n\n qpool.startService()\n cph.flushHolders()\n\n return cph, qpool, reactor, performerChosen", "def test_workerConnectionPoolPerformJob(self):\n clock = Clock()\n peerPool = ControllerQueue(clock, None)\n factory = peerPool.workerListenerFactory()\n\n def peer():\n p = factory.buildProtocol(None)\n t = StringTransport()\n p.makeConnection(t)\n return p, t\n\n worker1, _ignore_trans1 = peer()\n worker2, _ignore_trans2 = peer()\n\n # Ask the worker to do something.\n worker1.performJob(JobDescriptor(1, 1, \"ABC\"))\n self.assertEquals(worker1.currentLoad, 1)\n self.assertEquals(worker2.currentLoad, 0)\n\n # Now ask the pool to do something\n peerPool.workerPool.performJob(JobDescriptor(2, 1, \"ABC\"))\n self.assertEquals(worker1.currentLoad, 1)\n self.assertEquals(worker2.currentLoad, 1)", "async def create_work_pool(\n self,\n work_pool: WorkPoolCreate,\n ) -> WorkPool:\n try:\n response = await self._client.post(\n \"/work_pools/\",\n json=work_pool.dict(json_compatible=True, exclude_unset=True),\n )\n except httpx.HTTPStatusError as e:\n if e.response.status_code == status.HTTP_409_CONFLICT:\n raise prefect.exceptions.ObjectAlreadyExists(http_exc=e) from e\n else:\n raise\n\n return pydantic.parse_obj_as(WorkPool, response.json())", "def prepare_pr_optimal_model_condor_job(self, pool_type, pool_address, number_of_jobs, subtask_index, data_files, rank='0', extraArgs=''):\n ############\n copasi_file = 'auto_copasi_%d.$(Process).cps' % subtask_index\n output_file = ''\n \n \n \n if pool_type == 'ec2':\n binary_dir = '/usr/local/bin'\n transfer_executable = 'NO'\n else:\n binary_dir, binary = os.path.split(settings.COPASI_LOCAL_BINARY)\n transfer_executable = 'YES'\n \n input_files_string = ', '\n for data_file in data_files:\n input_files_string += (data_file + ', ')\n input_files_string = input_files_string.rstrip(', ')\n\n condor_job_string = Template(condor_spec.raw_condor_job_string).substitute(copasiFile=copasi_file, \n otherFiles=input_files_string,\n rank=rank,\n binary_dir = binary_dir,\n transfer_executable = transfer_executable,\n pool_type = pool_type,\n pool_address = pool_address,\n subtask=str(subtask_index),\n n = number_of_jobs,\n outputFile = output_file,\n extraArgs='',\n )\n \n condor_job_filename = 'auto_condor_%d.job'%subtask_index\n condor_job_full_filename = os.path.join(self.path, condor_job_filename)\n condor_file = open(condor_job_full_filename, 'w')\n condor_file.write(condor_job_string)\n condor_file.close()\n\n return condor_job_filename", "def pre_loadbalancer_pool_create(self, resource_dict):\n pass", "def test_poolStartServiceChecksForWork(self):\n reactor = MemoryReactorWithClock()\n cph = SteppablePoolHelper(nodeSchema + schemaText)\n then = datetime.datetime(2012, 12, 12, 12, 12, 0)\n reactor.advance(astimestamp(then))\n cph.setUp(self)\n pcp = PeerConnectionPool(reactor, cph.pool.connection, 4321, schema)\n now = then + datetime.timedelta(seconds=pcp.queueProcessTimeout * 2)\n\n @transactionally(cph.pool.connection)\n def createOldWork(txn):\n one = DummyWorkItem.create(txn, workID=1, a=3, b=4, notBefore=then)\n two = DummyWorkItem.create(txn, workID=2, a=7, b=9, notBefore=now)\n return gatherResults([one, two])\n\n pcp.startService()\n cph.flushHolders()\n reactor.advance(pcp.queueProcessTimeout * 2)\n self.assertEquals(\n cph.rows(\"select * from DUMMY_WORK_DONE\"),\n [(1, 7)]\n )\n cph.rows(\"delete from DUMMY_WORK_DONE\")\n reactor.advance(pcp.queueProcessTimeout * 2)\n self.assertEquals(\n cph.rows(\"select * from DUMMY_WORK_DONE\"),\n [(2, 16)]\n )", "def _get_unified_job_template_class(cls):\n raise NotImplementedError # Implement in subclass.", "def work(self, job):\n pass", "def prepare_ss_condor_job(self, pool_type, pool_address, number_of_jobs, subtask_index=1, rank='0', extraArgs=''):\n #New: only prepares a single job which allows multiple jobs to be queued\n #We must change the ownership of each of the copasi files to the user running this script\n #\n #We assume that we have write privileges on each of the files through our group, but don't have permission to actually change ownership (must be superuser to do this)\n #Thus, we workaround this by copying the original file, deleting the original, and moving the copy back to the original filename\n \n# import shutil\n# for i in range(len(self.get_optimization_parameters())):\n# for max in (0, 1):\n# copasi_file = os.path.join(self.path, 'auto_copasi_%d.cps' % (2*i + max))\n# temp_file = os.path.join(self.path, 'temp.cps')\n# shutil.copy2(copasi_file, temp_file)\n# os.remove(copasi_file)\n# os.rename(temp_file, copasi_file)\n# os.chmod(copasi_file, 0664) #Set as group readable and writable\n \n ############\n #Build the appropriate .job files for the sensitivity optimization task, write them to disk, and make a note of their locations\n condor_jobs = []\n \n copasi_file = 'auto_copasi_%d.$(Process).cps' % subtask_index\n output_file = 'output_%d.$(Process).txt' % subtask_index\n \n \n \n if pool_type == 'ec2':\n binary_dir = '/usr/local/bin'\n transfer_executable = 'NO'\n else:\n binary_dir, binary = os.path.split(settings.COPASI_LOCAL_BINARY)\n transfer_executable = 'YES'\n \n \n condor_job_string = Template(condor_spec.raw_condor_job_string).substitute(copasiFile=copasi_file, \n otherFiles='',\n rank=rank,\n binary_dir = binary_dir,\n transfer_executable = transfer_executable,\n pool_type = pool_type,\n pool_address = pool_address,\n subtask=str(subtask_index),\n n = number_of_jobs,\n outputFile = output_file,\n extraArgs='',\n )\n \n condor_job_filename = 'auto_condor_%d.job'%subtask_index\n condor_job_full_filename = os.path.join(self.path, condor_job_filename)\n condor_file = open(condor_job_full_filename, 'w')\n condor_file.write(condor_job_string)\n condor_file.close()\n\n return condor_job_filename", "def test_workerPerformJobNoZeroWeight(self):\n clock = Clock()\n peerPool = ControllerQueue(clock, None)\n factory = peerPool.workerListenerFactory()\n\n def peer():\n p = factory.buildProtocol(None)\n t = StringTransport()\n p.makeConnection(t)\n return p, t\n\n worker1, _ignore_trans1 = peer()\n worker2, _ignore_trans2 = peer()\n\n # Ask the worker to do something.\n worker1.performJob(JobDescriptor(1, 0, \"ABC\"))\n self.assertEquals(worker1.currentLoad, 1)\n self.assertEquals(worker2.currentLoad, 0)\n\n # Now ask the pool to do something\n peerPool.workerPool.performJob(JobDescriptor(2, 0, \"ABC\"))\n self.assertEquals(worker1.currentLoad, 1)\n self.assertEquals(worker2.currentLoad, 1)\n\n # Ask the worker to do something more.\n worker1.performJob(JobDescriptor(3, 5, \"ABC\"))\n self.assertEquals(worker1.currentLoad, 6)\n self.assertEquals(worker2.currentLoad, 1)", "def getWorker(self):\n pass", "def background_worker_pool(self):\r\n return self.run_tracker.background_worker_pool()", "def _initJobs(self):\n pass", "def pre_floating_ip_pool_create(self, resource_dict):\n pass", "def create_worker(context=None):\n return BasicWorker(context)", "def test_make_worker_with_threadpool_size(self):\n from twisted.internet import reactor\n\n old_maxthreads = reactor.getThreadPool().max\n self.add_cleanup(reactor.suggestThreadPoolSize, old_maxthreads)\n # Explicitly set the threadpool size to something different from the\n # value we're testing with.\n reactor.suggestThreadPoolSize(5)\n\n self.mk_config_file('worker', [\"transport_name: sphex\"])\n maker = VumiWorkerServiceMaker()\n\n # By default, we don't touch the threadpool.\n options = StartWorkerOptions()\n options.parseOptions([\n '--worker-class', 'vumi.demos.words.EchoWorker',\n '--config', self.config_file['worker'],\n ])\n worker = maker.makeService(options)\n self.assertEqual({'transport_name': 'sphex'}, worker.config)\n self.assertEqual(reactor.getThreadPool().max, 5)\n\n # If asked, we set the threadpool's maximum size.\n options_mt = StartWorkerOptions()\n options_mt.parseOptions([\n '--worker-class', 'vumi.demos.words.EchoWorker',\n '--config', self.config_file['worker'],\n '--maxthreads', '2',\n ])\n worker = maker.makeService(options_mt)\n self.assertEqual({'transport_name': 'sphex'}, worker.config)\n self.assertEqual(reactor.getThreadPool().max, 2)", "def prepare_so_condor_job(self, pool_type, pool_address, subtask_index=1, rank='0', extraArgs=''):\n #New: only prepares a single job which allows multiple jobs to be queued\n #We must change the ownership of each of the copasi files to the user running this script\n #\n #We assume that we have write privileges on each of the files through our group, but don't have permission to actually change ownership (must be superuser to do this)\n #Thus, we workaround this by copying the original file, deleting the original, and moving the copy back to the original filename\n \n# import shutil\n# for i in range(len(self.get_optimization_parameters())):\n# for max in (0, 1):\n# copasi_file = os.path.join(self.path, 'auto_copasi_%d.cps' % (2*i + max))\n# temp_file = os.path.join(self.path, 'temp.cps')\n# shutil.copy2(copasi_file, temp_file)\n# os.remove(copasi_file)\n# os.rename(temp_file, copasi_file)\n# os.chmod(copasi_file, 0664) #Set as group readable and writable\n \n ############\n #Build the appropriate .job files for the sensitivity optimization task, write them to disk, and make a note of their locations\n condor_jobs = []\n \n copasi_file = 'auto_copasi_%d.$(Process).cps' % subtask_index\n output_file = 'output_%d.$(Process).txt' % subtask_index\n \n n = len(self.get_optimization_parameters()) * 2\n \n if pool_type == 'ec2':\n binary_dir = '/usr/local/bin'\n transfer_executable = 'NO'\n else:\n binary_dir, binary = os.path.split(settings.COPASI_LOCAL_BINARY)\n transfer_executable = 'YES'\n \n \n condor_job_string = Template(condor_spec.raw_condor_job_string).substitute(copasiFile=copasi_file, \n otherFiles='',\n rank=rank,\n binary_dir = binary_dir,\n transfer_executable = transfer_executable,\n pool_type = pool_type,\n pool_address = pool_address,\n subtask=str(subtask_index),\n n = n,\n outputFile = output_file,\n extraArgs='',\n )\n \n condor_job_filename = 'auto_condor_%d.job'%subtask_index\n condor_job_full_filename = os.path.join(self.path, condor_job_filename)\n condor_file = open(condor_job_full_filename, 'w')\n condor_file.write(condor_job_string)\n condor_file.close()\n\n return condor_job_filename", "def post_loadbalancer_pool_create(self, resource_dict):\n pass", "async def test_create_flow_run_from_deployment_disambiguates_queue_name_from_other_pools(\n self, deployment, client, session\n ):\n # create a bunch of pools with \"default\" named queues\n for idx in range(3):\n await models.workers.create_work_pool(\n session=session,\n work_pool=schemas.actions.WorkPoolCreate(\n name=f\"Bogus Work Pool {idx}\", base_job_template={}\n ),\n )\n await session.commit()\n\n response = await client.post(\n f\"deployments/{deployment.id}/create_flow_run\",\n json=schemas.actions.DeploymentFlowRunCreate(\n work_queue_name=\"default\"\n ).dict(json_compatible=True),\n )\n assert response.json()[\"work_queue_name\"] == \"default\"\n assert response.json()[\"work_queue_id\"] == str(\n deployment.work_queue.work_pool.default_queue_id\n )", "def setup_jobs(self):\n transfer_args = [\"analysis_type\", \"perturbation\", \"num_permutations\", \"permutation_test_statistic\", \"loss_function\",\n \"importance_significance_level\", \"window_search_algorithm\", \"window_effect_size_threshold\"]\n jobs = [None] * self.num_jobs\n for idx in range(self.num_jobs):\n # Create and launch condor job\n features_filename = constants.INPUT_FEATURES_FILENAME.format(self.args.output_dir, idx)\n input_files = [features_filename, self.args.model_filename, self.args.model_loader_filename, self.args.data_filename]\n job_dir = f\"{self.args.output_dir}/outputs_{idx}\"\n cmd = f\"python3 -m anamod.core.worker -worker_idx {idx}\"\n for arg in transfer_args:\n if hasattr(self.args, arg):\n cmd += f\" -{arg} {getattr(self.args, arg)}\"\n # Relative file paths for non-shared FS, absolute for shared FS\n for name, path in dict(output_dir=job_dir, features_filename=features_filename, model_filename=self.args.model_filename,\n model_loader_filename=self.args.model_loader_filename, data_filename=self.args.data_filename).items():\n cmd += f\" -{name} {os.path.abspath(path)}\" if self.args.shared_filesystem else f\" -{name} {os.path.basename(path)}\"\n job = CondorJobWrapper(cmd, input_files, job_dir, shared_filesystem=self.args.shared_filesystem,\n memory=f\"{self.args.memory_requirement}GB\", disk=f\"{self.args.disk_requirement}GB\",\n avoid_bad_hosts=self.args.avoid_bad_hosts, retry_arbitrary_failures=self.args.retry_arbitrary_failures,\n cleanup=self.args.cleanup)\n jobs[idx] = job\n return jobs", "def get_default_pool():\n return 'tank'", "def __init__(self: AutoScalingCluster,\n source: Iterable[str] = None,\n num_tasks: int = 1,\n template: str = DEFAULT_TEMPLATE,\n bundlesize: int = DEFAULT_BUNDLESIZE,\n bundlewait: int = DEFAULT_BUNDLEWAIT,\n bind: Tuple[str, int] = ('0.0.0.0', QueueConfig.port),\n delay_start: float = DEFAULT_DELAY,\n launcher: str = DEFAULT_AUTOSCALE_LAUNCHER,\n launcher_args: List[str] = None,\n remote_exe: str = 'hyper-shell',\n max_retries: int = DEFAULT_ATTEMPTS,\n eager: bool = False,\n redirect_failures: IO = None,\n capture: bool = False,\n policy: str = DEFAULT_AUTOSCALE_POLICY,\n period: int = DEFAULT_AUTOSCALE_PERIOD,\n factor: float = DEFAULT_AUTOSCALE_FACTOR,\n init_size: int = DEFAULT_AUTOSCALE_INIT_SIZE,\n min_size: int = DEFAULT_AUTOSCALE_MIN_SIZE,\n max_size: int = DEFAULT_AUTOSCALE_MAX_SIZE,\n forever_mode: bool = False, # noqa: ignored (passed by ClusterApp)\n restart_mode: bool = False, # noqa: ignored (passed by ClusterApp)\n in_memory: bool = False, # noqa: ignored (passed by ClusterApp)\n no_confirm: bool = False, # noqa: ignored (passed by ClusterApp)\n client_timeout: int = None,\n task_timeout: int = None\n ) -> None:\n auth = secrets.token_hex(64)\n self.server = ServerThread(source=source, auth=auth, bundlesize=bundlesize, bundlewait=bundlewait,\n max_retries=max_retries, eager=eager, address=bind, forever_mode=True,\n redirect_failures=redirect_failures)\n launcher_args = '' if launcher_args is None else ' '.join(launcher_args)\n client_args = '' if not capture else '--capture'\n if client_timeout is not None:\n client_args += f' -T {client_timeout}'\n if task_timeout is not None:\n client_args += f' -W {task_timeout}'\n launcher = (f'{launcher} {launcher_args} {remote_exe} client -H {HOSTNAME} -p {bind[1]} '\n f'-N {num_tasks} -b {bundlesize} -w {bundlewait} -t \"{template}\" -k {auth} '\n f'-d {delay_start} {client_args}')\n self.autoscaler = AutoScalerThread(policy=policy, factor=factor, period=period,\n init_size=init_size, min_size=min_size, max_size=max_size,\n launcher=launcher)\n super().__init__(name='hypershell-cluster')", "def _spawn_worker(self, func, *args, **kwargs):\n if self._worker_pool.free():\n return self._worker_pool.spawn(func, *args, **kwargs)\n else:\n raise exception.NoFreeConductorWorker()", "def current_worker_pool():\n try:\n return worker_thread_data.pool\n except AttributeError:\n return None", "def create_job_tick(self, job_tick_data):", "def test_workerConnectionPoolPerformWork(self):\n clock = Clock()\n peerPool = PeerConnectionPool(clock, None, 4322, schema)\n factory = peerPool.workerListenerFactory()\n\n def peer():\n p = factory.buildProtocol(None)\n t = StringTransport()\n p.makeConnection(t)\n return p, t\n\n worker1, _ignore_trans1 = peer()\n worker2, _ignore_trans2 = peer()\n\n # Ask the worker to do something.\n worker1.performWork(schema.DUMMY_WORK_ITEM, 1)\n self.assertEquals(worker1.currentLoad, 1)\n self.assertEquals(worker2.currentLoad, 0)\n\n # Now ask the pool to do something\n peerPool.workerPool.performWork(schema.DUMMY_WORK_ITEM, 2)\n self.assertEquals(worker1.currentLoad, 1)\n self.assertEquals(worker2.currentLoad, 1)" ]
[ "0.6296183", "0.62347543", "0.615321", "0.6071176", "0.60353386", "0.60198176", "0.5917459", "0.59044653", "0.5898099", "0.58407146", "0.58316094", "0.5808723", "0.57955056", "0.5765526", "0.5756927", "0.57000154", "0.5697046", "0.5683254", "0.5608928", "0.5606832", "0.5575313", "0.5569678", "0.5526588", "0.5516842", "0.55040956", "0.54935676", "0.5481881", "0.54757553", "0.54748356", "0.5464765" ]
0.6370928
0
Test that the job configuration is correctly built from the template and overrides
async def test_base_job_configuration_from_template_and_overrides( template, overrides, expected ): config = await BaseJobConfiguration.from_template_and_values( base_job_template=template, values=overrides ) assert config.dict() == expected
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def test_job_configuration_from_template_and_overrides(\n template, overrides, expected\n):\n\n class ArbitraryJobConfiguration(BaseJobConfiguration):\n var1: str = Field(template=\"{{ var1 }}\")\n var2: int = Field(template=\"{{ var2 }}\")\n\n config = await ArbitraryJobConfiguration.from_template_and_values(\n base_job_template=template, values=overrides\n )\n assert config.dict() == expected", "def testJobProperties(databases):\n\n def checkProperties(config):\n jobId = 'job0'\n assert config.targets == {'target1', 'target2'}\n assert config.getId() == jobId\n assert config['name'] == jobId\n assert config.owner == gen.owner\n assert config['owner'] == gen.owner\n assert config.comment == gen.comment\n #assert config.getDescription() == config['description']\n\n gen = DataGenerator(databases)\n config = gen.createConfiguration(\n targets=('target1', 'target2')\n )\n runWithReload(databases, config, checkProperties)", "def test_configurator(self):\n runner = Runner(YamlManifest(manifest))\n run1 = runner.run(JobOptions(resource=\"test1\"))\n assert not run1.unexpectedAbort, run1.unexpectedAbort.getStackTrace()\n assert len(run1.workDone) == 1, run1.workDone\n result = list(run1.workDone.values())[0].result\n self.assertEqual(result.outputs, {\"fact1\": \"test1\", \"fact2\": \"test\"})\n self.assertEqual(result.result.get(\"stdout\"), sys.executable)\n assert run1.status == Status.ok, run1.summary()", "def test_get_job_config(self):\n test_app = self._create_app()\n class_path = \"spark.jobserver.VeryShortDoubleJob\"\n config = {\"test_config\": \"test_config_value\"}\n job = self.client.jobs.create(test_app, class_path,\n ctx=self._get_functional_context(),\n conf=config)\n time.sleep(3)\n self._wait_till_job_is_done(job)\n job = self.client.jobs.get(job.jobId)\n job_config = job.get_config()\n self.assertEqual(\"FINISHED\", job.status)\n self.assertEqual(config[\"test_config\"], job_config[\"test_config\"])", "def test_successful_configuration(self, mock_create, mock_msg_mgr):\n\n json_data = {\n \"input\" : {\n 'version': '6',\n 'files': {'input_a': [self.source_file.id]},\n 'json': {}\n },\n \"job_type_id\" : self.job_type1.pk,\n \"configuration\" : self.configuration\n }\n\n url = '/%s/jobs/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n result = json.loads(response.content)\n\n #Response should be new v6 job detail response\n self.assertEqual(result['execution'], None)\n self.assertTrue('/%s/jobs/' % self.api in response['location'])\n mock_create.assert_called_once()", "async def test_base_worker_gets_job_configuration_when_syncing_with_backend_with_just_job_config(\n session, client\n):\n\n class WorkerJobConfig(BaseJobConfiguration):\n other: Optional[str] = Field(template=\"{{other}}\")\n\n # Add a job configuration for the worker (currently used to create template\n # if not found on the worker pool)\n WorkerTestImpl.job_configuration = WorkerJobConfig\n\n expected_job_template = {\n \"job_configuration\": {\n \"command\": \"{{ command }}\",\n \"env\": \"{{ env }}\",\n \"labels\": \"{{ labels }}\",\n \"name\": \"{{ name }}\",\n \"other\": \"{{ other }}\",\n },\n \"variables\": {\n \"properties\": {\n \"command\": {\n \"type\": \"string\",\n \"title\": \"Command\",\n \"description\": (\n \"The command to use when starting a flow run. \"\n \"In most cases, this should be left blank and the command \"\n \"will be automatically generated by the worker.\"\n ),\n },\n \"env\": {\n \"title\": \"Environment Variables\",\n \"type\": \"object\",\n \"additionalProperties\": {\"type\": \"string\"},\n \"description\": (\n \"Environment variables to set when starting a flow run.\"\n ),\n },\n \"labels\": {\n \"title\": \"Labels\",\n \"type\": \"object\",\n \"additionalProperties\": {\"type\": \"string\"},\n \"description\": (\n \"Labels applied to infrastructure created by the worker using \"\n \"this job configuration.\"\n ),\n },\n \"name\": {\n \"type\": \"string\",\n \"title\": \"Name\",\n \"description\": (\n \"Name given to infrastructure created by the worker using this \"\n \"job configuration.\"\n ),\n },\n \"other\": {\"type\": \"string\", \"title\": \"Other\"},\n },\n \"type\": \"object\",\n },\n }\n\n pool_name = \"test-pool\"\n\n # Create a new worker pool\n response = await client.post(\n \"/work_pools/\", json=dict(name=pool_name, type=\"test-type\")\n )\n result = pydantic.parse_obj_as(schemas.core.WorkPool, response.json())\n model = await models.workers.read_work_pool(session=session, work_pool_id=result.id)\n assert model.name == pool_name\n\n # Create a worker with the new pool and sync with the backend\n worker = WorkerTestImpl(\n name=\"test\",\n work_pool_name=pool_name,\n )\n async with get_client() as client:\n worker._client = client\n await worker.sync_with_backend()\n\n assert worker._work_pool.base_job_template == expected_job_template", "def test_invalid_configuration(self):\n\n config = copy.deepcopy(self.configuration)\n config['version'] = 'BAD'\n json_data = {\n \"input\" : {\n 'version': '6',\n 'files': {'input_a': [self.source_file.id]},\n 'json': {}\n },\n \"job_type_id\" : self.job_type1.pk,\n \"configuration\" : config\n }\n\n url = '/%s/jobs/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)", "def get_valid_config(args):\n source = confuse.YamlSource(args.config)\n config = confuse.RootView([source])\n\n job_template = {\n \"job\": {\n \"name\": str,\n \"dir\": confuse.Optional(\n FilenameValidate(\n cwd=str(pathlib.Path(__file__).parent.absolute())),\n default=str(pathlib.Path(__file__).parent.absolute())\n ),\n }\n }\n job_config = config.get(job_template)\n\n logging_template = confuse.Optional(\n confuse.MappingTemplate({\n 'ids': confuse.StrSeq(),\n 'data': confuse.Sequence(\n confuse.Choice(['objectives', 'state', 'variables'])),\n 'timestamped': confuse.Optional(bool, default=True),\n \"to_file\": confuse.Optional(bool, default=True),\n \"to_console\": confuse.Optional(bool, default=False)\n })\n )\n\n sumo_template = {\n \"dir\": FilenameValidate(\n cwd=job_config.job.dir),\n \"gui\": confuse.Optional(bool, default=True),\n \"max_steps\": confuse.Optional(int, default=10e5),\n \"network\": FilenameValidate(relative_to=\"dir\"),\n }\n sumo_config = config.get({\"sumo\": sumo_template})\n sumo_template[\"additional\"] = confuse.Sequence(\n FilenameValidate(cwd=sumo_config.sumo.dir))\n sumo_template[\"route\"] = confuse.Sequence(\n FilenameValidate(cwd=sumo_config.sumo.dir))\n\n tls_template = confuse.Sequence({\n \"id\": str,\n \"controller\": confuse.Choice(\n TLSFactory.get_registered_keys()),\n \"constants\": confuse.MappingValues(\n confuse.OneOf([\n confuse.Number(),\n AllowedContainers(list),\n AllowedContainers(dict),\n FilenameValidate(cwd=job_config.job.dir),\n ExecutableValidate()\n ])\n ),\n \"variables\": confuse.MappingValues(\n confuse.OneOf([\n confuse.Number(),\n AllowedContainers(list)\n ])\n ),\n \"extract\": {\n \"user_data\": confuse.Sequence({\n \"feature\": confuse.Choice(\n [\"count\", \"speed\", \"eta\", \"delay\", \"waiting_time\"]),\n \"user_class\": confuse.Choice(\n [\"bicycle\", \"passenger\", \"pedestrian\", \"bus\", \"truck\", \"moped\"]),\n \"at\": confuse.Choice(\n [\"lane\", \"detector\", \"phase\"]),\n \"mapping\": AllowedContainers(dict)\n }),\n \"tls_data\": confuse.Sequence({\n \"feature\": confuse.Choice(\n [\"elapsed_time\", \"integer_phase\", \"binary_phase\"]),\n \"to_variable\": str\n })\n }\n })\n\n full_template = {\n \"logging\": logging_template,\n \"sumo\": sumo_template,\n \"tls\": tls_template,\n }\n job_template.update(full_template)\n valid_config = config.get(job_template)\n\n # second round of sumo validation\n assert len(valid_config.sumo.route) > 0, \\\n \"No demand definition: sumo.route is an empty list, expected at least one *.rou.xml\"\n \n # second round of logger validation, look if ids are given\n if valid_config.logging:\n if valid_config.logging.ids and valid_config.logging.data:\n output_dir = os.path.join(valid_config.job.dir, \"output\")\n os.makedirs(output_dir, exist_ok=True)\n valid_config.logging.update({\"dir\": output_dir})\n else:\n del valid_config['logging']\n\n return valid_config", "def test_create_seed_missing_param(self):\n\n url = '/%s/job-types/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['name'] = 'my-job-no-setting'\n manifest['job']['interface']['settings'] = []\n config = copy.deepcopy(self.configuration)\n #TODO investigate whether settings in config but not manifest should be removed\n config['settings'] = {}\n\n json_data = {\n 'manifest': {\n 'seedVersion': '1.0.0',\n 'job': {\n 'name': 'my-job'\n }\n }\n }\n\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)", "def test_template_task_config(exopy_qtbot, task_workbench):\n plugin = task_workbench.get_plugin('exopy.tasks')\n\n path = os.path.join(os.path.dirname(__file__),\n 'test_template.task.ini')\n root = RootTask()\n config = TemplateTaskConfig(manager=plugin,\n template_path=path,\n future_parent=root)\n assert config.template_doc\n task = config.build_task()\n assert len(task.children) == 1\n\n show_and_close_widget(exopy_qtbot, TemplateConfigView(config=config))", "def _create_job_config(\n self,\n experiment_id: str,\n params: Optional[dict],\n pipeline_package_path: Optional[str],\n pipeline_id: Optional[str],\n version_id: Optional[str],\n enable_caching: Optional[bool],\n ):\n\n class JobConfig:\n\n def __init__(self, spec, resource_references):\n self.spec = spec\n self.resource_references = resource_references\n\n params = params or {}\n pipeline_json_string = None\n if pipeline_package_path:\n pipeline_obj = self._extract_pipeline_yaml(pipeline_package_path)\n\n # Caching option set at submission time overrides the compile time settings.\n if enable_caching is not None:\n self._override_caching_options(pipeline_obj, enable_caching)\n\n pipeline_json_string = json.dumps(pipeline_obj)\n api_params = [\n kfp_server_api.V1Parameter(\n name=sanitize_k8s_name(name=k, allow_capital_underscore=True),\n value=str(v) if type(v) not in (list, dict) else json.dumps(v))\n for k, v in params.items()\n ]\n resource_references = []\n key = kfp_server_api.models.V1ResourceKey(\n id=experiment_id,\n type=kfp_server_api.models.V1ResourceType.EXPERIMENT)\n reference = kfp_server_api.models.V1ResourceReference(\n key=key, relationship=kfp_server_api.models.V1Relationship.OWNER)\n resource_references.append(reference)\n\n if version_id:\n key = kfp_server_api.models.V1ResourceKey(\n id=version_id,\n type=kfp_server_api.models.V1ResourceType.PIPELINE_VERSION)\n reference = kfp_server_api.models.V1ResourceReference(\n key=key,\n relationship=kfp_server_api.models.V1Relationship.CREATOR)\n resource_references.append(reference)\n\n spec = kfp_server_api.models.V1PipelineSpec(\n pipeline_id=pipeline_id,\n workflow_manifest=pipeline_json_string,\n parameters=api_params)\n return JobConfig(spec=spec, resource_references=resource_references)", "def testJobOneTask(databases):\n\n class CustomGenerator(DataGenerator):\n numTasks = 1\n numInputs = [ 0 ]\n numOutputs = [ 0 ]\n\n gen = CustomGenerator(databases)\n gen.createDefinitions()\n config = gen.createConfiguration()\n\n def checkOne(config):\n taskName = gen.tasks[0]\n #assert config.getProduct('') is None\n assert len(config.getInputs()) == 0\n assert len(config.getInputsGrouped()) == 0\n assert len(config.getTasks()) == 1\n task, = config.getTasks()\n assert task is not None\n assert task.getName() == taskName\n assert len(config.getTaskSequence()) == 1\n\n runWithReload(databases, config, checkOne)", "def test_create_seed_missing_setting(self):\n url = '/%s/job-types/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['name'] = 'my-job-no-setting'\n manifest['job']['interface']['settings'] = []\n config = copy.deepcopy(self.configuration)\n #TODO investigate whether settings in config but not manifest should be removed\n config['settings'] = {}\n\n json_data = {\n 'icon_code': 'BEEF',\n 'is_published': True,\n 'max_scheduled': 1,\n 'docker_image': 'my-job-no-setting-1.0.0-seed:1.0.0',\n 'manifest': manifest,\n 'configuration': config\n }\n\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n job_type = JobType.objects.filter(name='my-job-no-setting').first()\n\n results = json.loads(response.content)\n self.assertEqual(results['id'], job_type.id)\n self.assertEqual(results['manifest']['job']['interface']['settings'], [])\n self.assertEqual(results['configuration']['settings'], {})", "def test_successful_configuration(self):\n url = '/%s/job-types/validation/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n json_data = {\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})", "async def test_base_worker_gets_job_configuration_when_syncing_with_backend_with_job_config_and_variables(\n session, client\n):\n\n class WorkerJobConfig(BaseJobConfiguration):\n other: Optional[str] = Field(template=\"{{ other }}\")\n\n class WorkerVariables(BaseVariables):\n other: Optional[str] = Field(default=\"woof\")\n\n # Add a job configuration and variables for the worker (currently used to create template\n # if not found on the worker pool)\n WorkerTestImpl.job_configuration = WorkerJobConfig\n WorkerTestImpl.job_configuration_variables = WorkerVariables\n\n pool_name = \"test-pool\"\n\n # Create a new worker pool\n response = await client.post(\n \"/work_pools/\", json=dict(name=pool_name, type=\"test-type\")\n )\n result = pydantic.parse_obj_as(schemas.core.WorkPool, response.json())\n model = await models.workers.read_work_pool(session=session, work_pool_id=result.id)\n assert model.name == pool_name\n\n # Create a worker with the new pool and sync with the backend\n worker = WorkerTestImpl(\n name=\"test\",\n work_pool_name=pool_name,\n )\n async with get_client() as client:\n worker._client = client\n await worker.sync_with_backend()\n\n assert (\n worker._work_pool.base_job_template\n == WorkerTestImpl.get_default_base_job_template()\n )", "def test_edit_configuration(self):\n configuration = copy.deepcopy(self.configuration)\n configuration['settings'] = {'DB_HOST': 'other_scale_db'}\n configuration['mounts'] = {\n 'dted': {\n 'type': 'host',\n 'host_path': '/some/new/path'\n }\n }\n\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n json_data = {\n 'configuration': configuration,\n 'auto_update': False\n }\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n \n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})", "def testJobEmpty(databases):\n\n def checkEmpty(config):\n assert config.getParameter('') == None\n assert len(config.getInputs()) == 0\n assert len(config.getInputsGrouped()) == 0\n assert len(config.getTasks()) == 0\n assert len(config.getTaskSequence()) == 0\n\n config = DataGenerator(databases).createConfiguration()\n runWithReload(databases, config, checkEmpty)", "def _configure_regular_job(config, job_exe, job_type, system_logging_level):\n config.create_tasks(['pull', 'pre', 'main', 'post'])\n config.add_to_task('pull', args=create_pull_command(job_exe.docker_image))\n config.add_to_task('pre', args=PRE_TASK_COMMAND_ARGS)\n config.add_to_task('post', args=POST_TASK_COMMAND_ARGS)\n\n # Configure input workspaces\n ro_input_workspaces = {}\n rw_input_workspaces = {}\n for input_workspace in config.get_input_workspace_names():\n ro_input_workspaces[input_workspace] = TaskWorkspace(input_workspace, MODE_RO)\n rw_input_workspaces[input_workspace] = TaskWorkspace(input_workspace, MODE_RW)\n config.add_to_task('pre', workspaces=ro_input_workspaces)\n config.add_to_task('main', workspaces=ro_input_workspaces)\n # Post tasks have access to input workspaces in case input files need moved as part of parse results\n config.add_to_task('post', workspaces=rw_input_workspaces)\n\n # Configure output workspaces\n output_workspaces = {}\n for output_workspace in config.get_output_workspace_names():\n output_workspaces[output_workspace] = TaskWorkspace(output_workspace, MODE_RW)\n config.add_to_task('post', workspaces=output_workspaces)\n\n # Configure input/output mounts\n input_mnt_name = 'scale_input_mount'\n output_mnt_name = 'scale_output_mount'\n input_vol_name = get_job_exe_input_vol_name(job_exe)\n output_vol_name = get_job_exe_output_vol_name(job_exe)\n input_vol_ro = Volume(input_vol_name, SCALE_JOB_EXE_INPUT_PATH, MODE_RO, is_host=False)\n input_vol_rw = Volume(input_vol_name, SCALE_JOB_EXE_INPUT_PATH, MODE_RW, is_host=False)\n output_vol_ro = Volume(output_vol_name, SCALE_JOB_EXE_OUTPUT_PATH, MODE_RO, is_host=False)\n output_vol_rw = Volume(output_vol_name, SCALE_JOB_EXE_OUTPUT_PATH, MODE_RW, is_host=False)\n\n config.add_to_task('pre', mount_volumes={input_mnt_name: input_vol_rw, output_mnt_name: output_vol_rw},\n env_vars={'SYSTEM_LOGGING_LEVEL': system_logging_level})\n config.add_to_task('main', mount_volumes={input_mnt_name: input_vol_ro, output_mnt_name: output_vol_rw})\n config.add_to_task('post', mount_volumes={output_mnt_name: output_vol_ro},\n env_vars={'SYSTEM_LOGGING_LEVEL': system_logging_level})\n\n\n # Configure output directory\n env_vars = {'OUTPUT_DIR': SCALE_JOB_EXE_OUTPUT_PATH, 'INPUT_METADATA': SCALE_INPUT_METADATA_PATH}\n args = config._get_task_dict('main')['args']\n\n args = environment_expansion(env_vars, args)\n\n config.add_to_task('main', args=args, env_vars=env_vars)\n\n # Configure task resources\n resources = job_exe.get_resources()\n # Pull-task and pre-task require full amount of resources\n config.add_to_task('pull', resources=resources)\n config.add_to_task('pre', resources=resources)\n # Main-task no longer requires the input file space\n resources.subtract(NodeResources([Disk(job_exe.input_file_size)]))\n config.add_to_task('main', resources=resources)\n # Post-task no longer requires any disk space\n resources.remove_resource('disk')\n config.add_to_task('post', resources=resources)", "def get_test_config(cls, cluster, role, env, job, filler=''):\n return cls.CONFIG_BASE % {'job': job, 'role': role, 'env': env, 'cluster': cluster,\n 'inner': filler}", "def configure_queued_job(self, job):\n\n config = ExecutionConfiguration()\n data = job.get_job_data()\n\n # Add input file meta-data\n input_files_dict = self._create_input_file_dict(data)\n config.set_input_files(input_files_dict)\n\n # Set up env vars for job's input data\n input_values = data.get_injected_input_values(input_files_dict)\n interface = job.job_type_rev.get_input_interface()\n\n env_vars = {}\n if isinstance(data, JobData):\n # call job.data.job_data.JobData.get_injected_env_vars\n env_vars = data.get_injected_env_vars(input_files_dict, interface)\n else:\n # call old job.configuration.data.job_data.get_injected_env_vars\n # TODO: remove once old JobData class is no longer used\n env_vars = data.get_injected_env_vars(input_files_dict)\n\n task_workspaces = {}\n if job.job_type.is_system:\n # Add any workspaces needed for this system job\n task_workspaces = QueuedExecutionConfigurator._system_job_workspaces(job)\n else:\n # Set any output workspaces needed\n output_workspaces = {}\n if job.input and 'version' in job.input and job.input['version'] == '1.0':\n # Set output workspaces using legacy job data\n self._cache_workspace_names(data.get_output_workspace_ids())\n output_workspaces = {}\n for output, workspace_id in data.get_output_workspaces().items():\n output_workspaces[output] = self._cached_workspace_names[workspace_id]\n config.set_output_workspaces(output_workspaces)\n if not output_workspaces:\n # Set output workspaces from job configuration\n output_workspaces = {}\n job_config = job.get_job_configuration()\n interface = SeedManifest(job.job_type_rev.manifest, do_validate=False)\n for output_name in interface.get_file_output_names():\n output_workspace = job_config.get_output_workspace(output_name)\n if output_workspace:\n output_workspaces[output_name] = output_workspace\n config.set_output_workspaces(output_workspaces)\n\n # Create main task with fields populated from input data\n args = job.get_job_interface().get_injected_command_args(input_values, env_vars)\n config.create_tasks(['main'])\n config.add_to_task('main', args=args, env_vars=env_vars, workspaces=task_workspaces)\n return config", "def getConfig(self):\n\n\n\n config = self.testInit.getConfiguration()\n\n config.component_(\"Agent\")\n config.Agent.WMSpecDirectory = self.testDir\n config.Agent.agentName = 'testAgent'\n config.Agent.componentName = 'test'\n\n\n # First the general stuff\n config.section_(\"General\")\n config.General.workDir = os.getenv(\"TESTDIR\", self.testDir)\n\n # Now the CoreDatabase information\n # This should be the dialect, dburl, etc\n\n config.section_(\"CoreDatabase\")\n config.CoreDatabase.connectUrl = os.getenv(\"DATABASE\")\n config.CoreDatabase.socket = os.getenv(\"DBSOCK\")\n\n\n\n # JobCreator\n config.component_(\"JobCreator\")\n config.JobCreator.namespace = 'WMComponent.JobCreator.JobCreator'\n config.JobCreator.logLevel = 'DEBUG'\n config.JobCreator.maxThreads = 1\n config.JobCreator.UpdateFromResourceControl = True\n config.JobCreator.pollInterval = 10\n config.JobCreator.jobCacheDir = self.testDir\n config.JobCreator.defaultJobType = 'processing' #Type of jobs that we run, used for resource control\n config.JobCreator.workerThreads = 2\n config.JobCreator.componentDir = os.path.join(os.getcwd(), 'Components')\n\n\n\n # JobSubmitter\n config.component_(\"JobSubmitter\")\n config.JobSubmitter.namespace = 'WMComponent.JobSubmitter.JobSubmitter'\n config.JobSubmitter.logLevel = 'INFO'\n config.JobSubmitter.maxThreads = 1\n config.JobSubmitter.pollInterval = 10\n config.JobSubmitter.pluginName = 'CondorGlobusPlugin'\n config.JobSubmitter.pluginDir = 'JobSubmitter.Plugins'\n config.JobSubmitter.submitDir = os.path.join(self.testDir, 'submit')\n config.JobSubmitter.submitNode = os.getenv(\"HOSTNAME\", 'badtest.fnal.gov')\n config.JobSubmitter.submitScript = os.path.join(getWMBASE(),\n 'test/python/WMComponent_t/JobSubmitter_t',\n 'submit.sh')\n config.JobSubmitter.componentDir = os.path.join(os.getcwd(), 'Components')\n config.JobSubmitter.workerThreads = 2\n config.JobSubmitter.jobsPerWorker = 200\n\n\n\n\n # JobTracker\n config.component_(\"JobTracker\")\n config.JobTracker.logLevel = 'DEBUG'\n config.JobTracker.pollInterval = 10\n config.JobTracker.trackerName = 'CondorTracker'\n config.JobTracker.pluginDir = 'WMComponent.JobTracker.Plugins'\n config.JobTracker.componentDir = os.path.join(os.getcwd(), 'Components')\n config.JobTracker.runTimeLimit = 7776000 #Jobs expire after 90 days\n config.JobTracker.idleTimeLimit = 7776000\n config.JobTracker.heldTimeLimit = 7776000\n config.JobTracker.unknTimeLimit = 7776000\n\n\n\n # JobAccountant\n config.component_(\"JobAccountant\")\n config.JobAccountant.pollInterval = 60\n config.JobAccountant.componentDir = os.path.join(os.getcwd(), 'Components')\n config.JobAccountant.logLevel = 'INFO'\n\n\n\n # JobArchiver\n config.component_(\"JobArchiver\")\n config.JobArchiver.pollInterval = 60\n config.JobArchiver.logLevel = 'INFO'\n config.JobArchiver.logDir = os.path.join(self.testDir, 'logs')\n config.JobArchiver.componentDir = os.path.join(os.getcwd(), 'Components')\n config.JobArchiver.numberOfJobsToCluster = 1000\n\n\n\n # Task Archiver\n config.component_(\"TaskArchiver\")\n config.TaskArchiver.componentDir = self.testInit.generateWorkDir()\n config.TaskArchiver.WorkQueueParams = {}\n config.TaskArchiver.pollInterval = 60\n config.TaskArchiver.logLevel = 'INFO'\n config.TaskArchiver.timeOut = 0\n\n\n\n # JobStateMachine\n config.component_('JobStateMachine')\n config.JobStateMachine.couchurl = os.getenv('COUCHURL',\n 'mnorman:[email protected]:5984')\n config.JobStateMachine.couchDBName = \"mnorman_test\"\n\n\n # Needed, because this is a test\n os.makedirs(config.JobSubmitter.submitDir)\n\n\n return config", "def test_create_seed_validation(self):\n\n url = '/%s/job-types/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['name'] = 'validation'\n\n json_data = {\n 'icon_code': 'BEEF',\n 'is_published': True,\n 'docker_image': 'my-new-job-1.0.0-seed:1.0.0',\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)", "def testJobCombinedProduct(databases):\n\n class CustomGenerator(DataGenerator):\n pass\n gen = CustomGenerator(databases)\n\n image = gen.createProduct('image', False, True)\n buildFw = gen.createFramework('build', [], [ image ])\n testFw = gen.createFramework('test', [ image ], [])\n buildTask1 = gen.createTask('build1', buildFw)\n buildTask2 = gen.createTask('build2', buildFw)\n testTask = gen.createTask('test', testFw)\n\n buildTR = gen.createTaskRunner(name='tr_build', capabilities=['build'])\n testTR = gen.createTaskRunner(name='tr_test', capabilities=['test'])\n\n def simulate(config):\n sanityCheck(gen, config)\n\n job, = config.createJobs(gen.owner)\n # TODO: The toXML functionality should probably be tested\n # in a separate test case.\n\n # Verify execution:\n # Successfully complete first build task.\n task = job.assignTask(databases.resourceDB[buildTR])\n assert task is not None\n assert task.getName().startswith('build')\n taskDone(job, task.getName())\n # Try to start test task (should fail).\n task = job.assignTask(databases.resourceDB[testTR])\n assert task is None\n # Complete second build task, but make it fail.\n task = job.assignTask(databases.resourceDB[buildTR])\n assert task is not None\n assert task.getName().startswith('build')\n taskDone(job, task.getName(), ResultCode.ERROR)\n # Try to start test task (should succeed).\n task = job.assignTask(databases.resourceDB[testTR])\n assert task is not None\n assert task.getName() == testTask\n # Successfully complete test task.\n taskDone(job, testTask)\n assert job.isExecutionFinished()\n assert job.hasFinalResult()\n # Check that locators have been stored separately.\n producers = set()\n for taskId, locator in job.getProduct(image).getProducers():\n assert taskId.startswith('build')\n assert locator == locatorForTask(taskId)\n\n runWithReload(databases, gen.createConfiguration(), simulate)", "def test_build_from_template(temp_with_override, cli):\n # Create the book from the template\n book = temp_with_override / \"new_book\"\n _ = cli.invoke(commands.create, book.as_posix())\n build_result = cli.invoke(\n commands.build, [book.as_posix(), \"-n\", \"-W\", \"--keep-going\"]\n )\n assert build_result.exit_code == 0, build_result.output\n html = book.joinpath(\"_build\", \"html\")\n assert html.joinpath(\"index.html\").exists()\n assert html.joinpath(\"intro.html\").exists()", "def create_job_ymls(job_template_args, job_template_dir, job_template_name, output_dir, upload=True):\n for i, template_args in enumerate(job_template_args):\n template_loader = jinja2.FileSystemLoader(searchpath=job_template_dir)\n template_env = jinja2.Environment(loader=template_loader)\n template_file = job_template_name\n template = template_env.get_template(template_file)\n output_text = template.render(template_args)\n job_yml = 'vista-job-{}.yml'.format(i)\n job_yml_path = join(output_dir, job_yml)\n \n if not exists(output_dir):\n mkdir(output_dir)\n with open(job_yml_path, 'w') as output_template:\n output_template.write(output_text)\n if upload:\n upload_blob(UPLOAD_BUCKET, job_yml_path, job_yml)\n print(job_yml, 'uploaded')", "def run(params):\n jobs_config_file = os.path.join(CONFIG_PATH, 'jobs.yaml')\n\n jenkins_config_file = os.path.join(RESOURCE_PATH, 'jobs', 'config')\n\n jobs_path = os.path.join(RESOURCE_PATH, 'jobs')\n\n jobs = list(JobGenerator(jobs_config_file).jobs())\n\n if params.jobs:\n jobs = [job for job in jobs if fnmatch.fnmatch(job.name, params.jobs)]\n yaml_obj = [job.get_object() for job in jobs]\n if params.config:\n yaml_file = open(params.config, 'w')\n yaml_path = params.config\n else:\n yaml_file = tempfile.NamedTemporaryFile(\n prefix='libvirt_ci-jobs-', suffix='.yaml',\n dir=jobs_path, delete=False)\n yaml_path = yaml_file.name\n try:\n yaml.dump(yaml_obj, stream=yaml_file, indent=4,\n default_flow_style=False)\n yaml_file.close()\n\n if params.only_config:\n return\n\n cmd = \"jenkins-jobs\"\n cmd += \" --conf %s\" % jenkins_config_file\n if params.test:\n cmd += \" test\"\n else:\n cmd += \" update\"\n\n cmd += \" -r %s\" % jobs_path\n if params.jobs:\n cmd += \" %s\" % params.jobs\n # Ignore standard output of jenkins-job-builder\n cmd += \" > /dev/null\"\n\n utils.run(cmd, debug=True, ignore_fail=False, timeout=3600)\n finally:\n if params.only_config:\n LOGGER.info('Keep job file %s', yaml_path)\n else:\n try:\n LOGGER.info('Removing job file %s', yaml_path)\n os.remove(yaml_path)\n except (OSError, IOError) as details:\n LOGGER.warning('Failed to remove job file %s: %s',\n yaml_file.name, details)", "def test_init(self):\n self.assertEqual(self.job.InputArgs, {'ParamFile': 'test.txt', \n 'SubmitToTestDB': 'True', \n 'UserId': '1', 'Mapping': '5', \n 'ProcessOnly': 'True', \n 'StudyID': '2', 'SFF': '10', \n 'SeqPlatform': 'FLX'})\n self.assertEqual(self.job.OracleJobName, 'jobname')", "def test_custom_config(cli, build_resources):\n books, _ = build_resources\n config = books.joinpath(\"config\")\n result = cli.invoke(commands.build, [config.as_posix(), \"-n\", \"-W\", \"--keep-going\"])\n assert result.exit_code == 0, result.output\n html = config.joinpath(\"_build\", \"html\", \"index.html\").read_text(encoding=\"utf8\")\n soup = BeautifulSoup(html, \"html.parser\")\n assert '<p class=\"title logo__title\">TEST PROJECT NAME</p>' in html\n assert '<div class=\"sphinx-tabs docutils container\">' in html\n assert '<link rel=\"stylesheet\" type=\"text/css\" href=\"_static/mycss.css\" />' in html\n assert '<script src=\"_static/js/myjs.js\"></script>' in html\n\n # Check that our comments engines were correctly added\n assert soup.find(\"script\", attrs={\"kind\": \"hypothesis\"})\n assert soup.find(\"script\", attrs={\"kind\": \"utterances\"})", "def test_jobs_successful(self):\n\n workspace = storage_test_utils.create_workspace()\n file1 = storage_test_utils.create_file()\n data_dict = {\n 'version': '1.0',\n 'input_data': [{\n 'name': 'INPUT_IMAGE',\n 'file_id': file1.id\n }],\n 'output_data': [{\n 'name': 'output_file_pngs',\n 'workspace_id': workspace.id\n }]}\n\n secret_configuration = {\n 'version': '6',\n 'priority': 50,\n 'output_workspaces': {'default': storage_test_utils.create_workspace().name},\n 'settings': {\n 'DB_HOST': 'som.host.name',\n 'DB_PASS': 'secret_password'\n }\n }\n\n seed_job_type = job_test_utils.create_seed_job_type(configuration=secret_configuration)\n seed_job = job_test_utils.create_job(job_type=seed_job_type, status='RUNNING', input=data_dict)\n\n url = '/%s/jobs/%d/' % (self.api, seed_job.id)\n response = self.client.generic('GET', url)\n result = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n self.assertEqual(result['configuration']['priority'],50)\n self.assertNotIn('DB_PASS', result['configuration']['settings'])", "def write_config_for_job(job_script, filename, include_defaults, fail_on_missing):\n j = Job()\n j.script = job_script\n j._load_script()\n component_names = [c.__class__.__name__ for c in j.components]\n write_config('job.cfg', component_names, include_defaults, fail_on_missing)" ]
[ "0.80357426", "0.72256106", "0.69125086", "0.6896827", "0.6699468", "0.663811", "0.64339805", "0.6272028", "0.6243319", "0.6226876", "0.6217676", "0.6210075", "0.618887", "0.617924", "0.61525685", "0.61100477", "0.6062935", "0.6060679", "0.6009658", "0.5990625", "0.59586096", "0.5952746", "0.5942789", "0.59395313", "0.59332186", "0.592684", "0.59116757", "0.58845943", "0.5883366", "0.5863076" ]
0.8151706
0
Test that the job configuration is correctly built from the template and overrides
async def test_job_configuration_from_template_and_overrides( template, overrides, expected ): class ArbitraryJobConfiguration(BaseJobConfiguration): var1: str = Field(template="{{ var1 }}") var2: int = Field(template="{{ var2 }}") config = await ArbitraryJobConfiguration.from_template_and_values( base_job_template=template, values=overrides ) assert config.dict() == expected
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def test_base_job_configuration_from_template_and_overrides(\n template, overrides, expected\n):\n config = await BaseJobConfiguration.from_template_and_values(\n base_job_template=template, values=overrides\n )\n assert config.dict() == expected", "def testJobProperties(databases):\n\n def checkProperties(config):\n jobId = 'job0'\n assert config.targets == {'target1', 'target2'}\n assert config.getId() == jobId\n assert config['name'] == jobId\n assert config.owner == gen.owner\n assert config['owner'] == gen.owner\n assert config.comment == gen.comment\n #assert config.getDescription() == config['description']\n\n gen = DataGenerator(databases)\n config = gen.createConfiguration(\n targets=('target1', 'target2')\n )\n runWithReload(databases, config, checkProperties)", "def test_configurator(self):\n runner = Runner(YamlManifest(manifest))\n run1 = runner.run(JobOptions(resource=\"test1\"))\n assert not run1.unexpectedAbort, run1.unexpectedAbort.getStackTrace()\n assert len(run1.workDone) == 1, run1.workDone\n result = list(run1.workDone.values())[0].result\n self.assertEqual(result.outputs, {\"fact1\": \"test1\", \"fact2\": \"test\"})\n self.assertEqual(result.result.get(\"stdout\"), sys.executable)\n assert run1.status == Status.ok, run1.summary()", "def test_get_job_config(self):\n test_app = self._create_app()\n class_path = \"spark.jobserver.VeryShortDoubleJob\"\n config = {\"test_config\": \"test_config_value\"}\n job = self.client.jobs.create(test_app, class_path,\n ctx=self._get_functional_context(),\n conf=config)\n time.sleep(3)\n self._wait_till_job_is_done(job)\n job = self.client.jobs.get(job.jobId)\n job_config = job.get_config()\n self.assertEqual(\"FINISHED\", job.status)\n self.assertEqual(config[\"test_config\"], job_config[\"test_config\"])", "def test_successful_configuration(self, mock_create, mock_msg_mgr):\n\n json_data = {\n \"input\" : {\n 'version': '6',\n 'files': {'input_a': [self.source_file.id]},\n 'json': {}\n },\n \"job_type_id\" : self.job_type1.pk,\n \"configuration\" : self.configuration\n }\n\n url = '/%s/jobs/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n result = json.loads(response.content)\n\n #Response should be new v6 job detail response\n self.assertEqual(result['execution'], None)\n self.assertTrue('/%s/jobs/' % self.api in response['location'])\n mock_create.assert_called_once()", "async def test_base_worker_gets_job_configuration_when_syncing_with_backend_with_just_job_config(\n session, client\n):\n\n class WorkerJobConfig(BaseJobConfiguration):\n other: Optional[str] = Field(template=\"{{other}}\")\n\n # Add a job configuration for the worker (currently used to create template\n # if not found on the worker pool)\n WorkerTestImpl.job_configuration = WorkerJobConfig\n\n expected_job_template = {\n \"job_configuration\": {\n \"command\": \"{{ command }}\",\n \"env\": \"{{ env }}\",\n \"labels\": \"{{ labels }}\",\n \"name\": \"{{ name }}\",\n \"other\": \"{{ other }}\",\n },\n \"variables\": {\n \"properties\": {\n \"command\": {\n \"type\": \"string\",\n \"title\": \"Command\",\n \"description\": (\n \"The command to use when starting a flow run. \"\n \"In most cases, this should be left blank and the command \"\n \"will be automatically generated by the worker.\"\n ),\n },\n \"env\": {\n \"title\": \"Environment Variables\",\n \"type\": \"object\",\n \"additionalProperties\": {\"type\": \"string\"},\n \"description\": (\n \"Environment variables to set when starting a flow run.\"\n ),\n },\n \"labels\": {\n \"title\": \"Labels\",\n \"type\": \"object\",\n \"additionalProperties\": {\"type\": \"string\"},\n \"description\": (\n \"Labels applied to infrastructure created by the worker using \"\n \"this job configuration.\"\n ),\n },\n \"name\": {\n \"type\": \"string\",\n \"title\": \"Name\",\n \"description\": (\n \"Name given to infrastructure created by the worker using this \"\n \"job configuration.\"\n ),\n },\n \"other\": {\"type\": \"string\", \"title\": \"Other\"},\n },\n \"type\": \"object\",\n },\n }\n\n pool_name = \"test-pool\"\n\n # Create a new worker pool\n response = await client.post(\n \"/work_pools/\", json=dict(name=pool_name, type=\"test-type\")\n )\n result = pydantic.parse_obj_as(schemas.core.WorkPool, response.json())\n model = await models.workers.read_work_pool(session=session, work_pool_id=result.id)\n assert model.name == pool_name\n\n # Create a worker with the new pool and sync with the backend\n worker = WorkerTestImpl(\n name=\"test\",\n work_pool_name=pool_name,\n )\n async with get_client() as client:\n worker._client = client\n await worker.sync_with_backend()\n\n assert worker._work_pool.base_job_template == expected_job_template", "def test_invalid_configuration(self):\n\n config = copy.deepcopy(self.configuration)\n config['version'] = 'BAD'\n json_data = {\n \"input\" : {\n 'version': '6',\n 'files': {'input_a': [self.source_file.id]},\n 'json': {}\n },\n \"job_type_id\" : self.job_type1.pk,\n \"configuration\" : config\n }\n\n url = '/%s/jobs/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)", "def get_valid_config(args):\n source = confuse.YamlSource(args.config)\n config = confuse.RootView([source])\n\n job_template = {\n \"job\": {\n \"name\": str,\n \"dir\": confuse.Optional(\n FilenameValidate(\n cwd=str(pathlib.Path(__file__).parent.absolute())),\n default=str(pathlib.Path(__file__).parent.absolute())\n ),\n }\n }\n job_config = config.get(job_template)\n\n logging_template = confuse.Optional(\n confuse.MappingTemplate({\n 'ids': confuse.StrSeq(),\n 'data': confuse.Sequence(\n confuse.Choice(['objectives', 'state', 'variables'])),\n 'timestamped': confuse.Optional(bool, default=True),\n \"to_file\": confuse.Optional(bool, default=True),\n \"to_console\": confuse.Optional(bool, default=False)\n })\n )\n\n sumo_template = {\n \"dir\": FilenameValidate(\n cwd=job_config.job.dir),\n \"gui\": confuse.Optional(bool, default=True),\n \"max_steps\": confuse.Optional(int, default=10e5),\n \"network\": FilenameValidate(relative_to=\"dir\"),\n }\n sumo_config = config.get({\"sumo\": sumo_template})\n sumo_template[\"additional\"] = confuse.Sequence(\n FilenameValidate(cwd=sumo_config.sumo.dir))\n sumo_template[\"route\"] = confuse.Sequence(\n FilenameValidate(cwd=sumo_config.sumo.dir))\n\n tls_template = confuse.Sequence({\n \"id\": str,\n \"controller\": confuse.Choice(\n TLSFactory.get_registered_keys()),\n \"constants\": confuse.MappingValues(\n confuse.OneOf([\n confuse.Number(),\n AllowedContainers(list),\n AllowedContainers(dict),\n FilenameValidate(cwd=job_config.job.dir),\n ExecutableValidate()\n ])\n ),\n \"variables\": confuse.MappingValues(\n confuse.OneOf([\n confuse.Number(),\n AllowedContainers(list)\n ])\n ),\n \"extract\": {\n \"user_data\": confuse.Sequence({\n \"feature\": confuse.Choice(\n [\"count\", \"speed\", \"eta\", \"delay\", \"waiting_time\"]),\n \"user_class\": confuse.Choice(\n [\"bicycle\", \"passenger\", \"pedestrian\", \"bus\", \"truck\", \"moped\"]),\n \"at\": confuse.Choice(\n [\"lane\", \"detector\", \"phase\"]),\n \"mapping\": AllowedContainers(dict)\n }),\n \"tls_data\": confuse.Sequence({\n \"feature\": confuse.Choice(\n [\"elapsed_time\", \"integer_phase\", \"binary_phase\"]),\n \"to_variable\": str\n })\n }\n })\n\n full_template = {\n \"logging\": logging_template,\n \"sumo\": sumo_template,\n \"tls\": tls_template,\n }\n job_template.update(full_template)\n valid_config = config.get(job_template)\n\n # second round of sumo validation\n assert len(valid_config.sumo.route) > 0, \\\n \"No demand definition: sumo.route is an empty list, expected at least one *.rou.xml\"\n \n # second round of logger validation, look if ids are given\n if valid_config.logging:\n if valid_config.logging.ids and valid_config.logging.data:\n output_dir = os.path.join(valid_config.job.dir, \"output\")\n os.makedirs(output_dir, exist_ok=True)\n valid_config.logging.update({\"dir\": output_dir})\n else:\n del valid_config['logging']\n\n return valid_config", "def test_create_seed_missing_param(self):\n\n url = '/%s/job-types/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['name'] = 'my-job-no-setting'\n manifest['job']['interface']['settings'] = []\n config = copy.deepcopy(self.configuration)\n #TODO investigate whether settings in config but not manifest should be removed\n config['settings'] = {}\n\n json_data = {\n 'manifest': {\n 'seedVersion': '1.0.0',\n 'job': {\n 'name': 'my-job'\n }\n }\n }\n\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)", "def test_template_task_config(exopy_qtbot, task_workbench):\n plugin = task_workbench.get_plugin('exopy.tasks')\n\n path = os.path.join(os.path.dirname(__file__),\n 'test_template.task.ini')\n root = RootTask()\n config = TemplateTaskConfig(manager=plugin,\n template_path=path,\n future_parent=root)\n assert config.template_doc\n task = config.build_task()\n assert len(task.children) == 1\n\n show_and_close_widget(exopy_qtbot, TemplateConfigView(config=config))", "def _create_job_config(\n self,\n experiment_id: str,\n params: Optional[dict],\n pipeline_package_path: Optional[str],\n pipeline_id: Optional[str],\n version_id: Optional[str],\n enable_caching: Optional[bool],\n ):\n\n class JobConfig:\n\n def __init__(self, spec, resource_references):\n self.spec = spec\n self.resource_references = resource_references\n\n params = params or {}\n pipeline_json_string = None\n if pipeline_package_path:\n pipeline_obj = self._extract_pipeline_yaml(pipeline_package_path)\n\n # Caching option set at submission time overrides the compile time settings.\n if enable_caching is not None:\n self._override_caching_options(pipeline_obj, enable_caching)\n\n pipeline_json_string = json.dumps(pipeline_obj)\n api_params = [\n kfp_server_api.V1Parameter(\n name=sanitize_k8s_name(name=k, allow_capital_underscore=True),\n value=str(v) if type(v) not in (list, dict) else json.dumps(v))\n for k, v in params.items()\n ]\n resource_references = []\n key = kfp_server_api.models.V1ResourceKey(\n id=experiment_id,\n type=kfp_server_api.models.V1ResourceType.EXPERIMENT)\n reference = kfp_server_api.models.V1ResourceReference(\n key=key, relationship=kfp_server_api.models.V1Relationship.OWNER)\n resource_references.append(reference)\n\n if version_id:\n key = kfp_server_api.models.V1ResourceKey(\n id=version_id,\n type=kfp_server_api.models.V1ResourceType.PIPELINE_VERSION)\n reference = kfp_server_api.models.V1ResourceReference(\n key=key,\n relationship=kfp_server_api.models.V1Relationship.CREATOR)\n resource_references.append(reference)\n\n spec = kfp_server_api.models.V1PipelineSpec(\n pipeline_id=pipeline_id,\n workflow_manifest=pipeline_json_string,\n parameters=api_params)\n return JobConfig(spec=spec, resource_references=resource_references)", "def testJobOneTask(databases):\n\n class CustomGenerator(DataGenerator):\n numTasks = 1\n numInputs = [ 0 ]\n numOutputs = [ 0 ]\n\n gen = CustomGenerator(databases)\n gen.createDefinitions()\n config = gen.createConfiguration()\n\n def checkOne(config):\n taskName = gen.tasks[0]\n #assert config.getProduct('') is None\n assert len(config.getInputs()) == 0\n assert len(config.getInputsGrouped()) == 0\n assert len(config.getTasks()) == 1\n task, = config.getTasks()\n assert task is not None\n assert task.getName() == taskName\n assert len(config.getTaskSequence()) == 1\n\n runWithReload(databases, config, checkOne)", "def test_create_seed_missing_setting(self):\n url = '/%s/job-types/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['name'] = 'my-job-no-setting'\n manifest['job']['interface']['settings'] = []\n config = copy.deepcopy(self.configuration)\n #TODO investigate whether settings in config but not manifest should be removed\n config['settings'] = {}\n\n json_data = {\n 'icon_code': 'BEEF',\n 'is_published': True,\n 'max_scheduled': 1,\n 'docker_image': 'my-job-no-setting-1.0.0-seed:1.0.0',\n 'manifest': manifest,\n 'configuration': config\n }\n\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n job_type = JobType.objects.filter(name='my-job-no-setting').first()\n\n results = json.loads(response.content)\n self.assertEqual(results['id'], job_type.id)\n self.assertEqual(results['manifest']['job']['interface']['settings'], [])\n self.assertEqual(results['configuration']['settings'], {})", "def test_successful_configuration(self):\n url = '/%s/job-types/validation/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n json_data = {\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})", "async def test_base_worker_gets_job_configuration_when_syncing_with_backend_with_job_config_and_variables(\n session, client\n):\n\n class WorkerJobConfig(BaseJobConfiguration):\n other: Optional[str] = Field(template=\"{{ other }}\")\n\n class WorkerVariables(BaseVariables):\n other: Optional[str] = Field(default=\"woof\")\n\n # Add a job configuration and variables for the worker (currently used to create template\n # if not found on the worker pool)\n WorkerTestImpl.job_configuration = WorkerJobConfig\n WorkerTestImpl.job_configuration_variables = WorkerVariables\n\n pool_name = \"test-pool\"\n\n # Create a new worker pool\n response = await client.post(\n \"/work_pools/\", json=dict(name=pool_name, type=\"test-type\")\n )\n result = pydantic.parse_obj_as(schemas.core.WorkPool, response.json())\n model = await models.workers.read_work_pool(session=session, work_pool_id=result.id)\n assert model.name == pool_name\n\n # Create a worker with the new pool and sync with the backend\n worker = WorkerTestImpl(\n name=\"test\",\n work_pool_name=pool_name,\n )\n async with get_client() as client:\n worker._client = client\n await worker.sync_with_backend()\n\n assert (\n worker._work_pool.base_job_template\n == WorkerTestImpl.get_default_base_job_template()\n )", "def test_edit_configuration(self):\n configuration = copy.deepcopy(self.configuration)\n configuration['settings'] = {'DB_HOST': 'other_scale_db'}\n configuration['mounts'] = {\n 'dted': {\n 'type': 'host',\n 'host_path': '/some/new/path'\n }\n }\n\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n json_data = {\n 'configuration': configuration,\n 'auto_update': False\n }\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n \n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})", "def testJobEmpty(databases):\n\n def checkEmpty(config):\n assert config.getParameter('') == None\n assert len(config.getInputs()) == 0\n assert len(config.getInputsGrouped()) == 0\n assert len(config.getTasks()) == 0\n assert len(config.getTaskSequence()) == 0\n\n config = DataGenerator(databases).createConfiguration()\n runWithReload(databases, config, checkEmpty)", "def _configure_regular_job(config, job_exe, job_type, system_logging_level):\n config.create_tasks(['pull', 'pre', 'main', 'post'])\n config.add_to_task('pull', args=create_pull_command(job_exe.docker_image))\n config.add_to_task('pre', args=PRE_TASK_COMMAND_ARGS)\n config.add_to_task('post', args=POST_TASK_COMMAND_ARGS)\n\n # Configure input workspaces\n ro_input_workspaces = {}\n rw_input_workspaces = {}\n for input_workspace in config.get_input_workspace_names():\n ro_input_workspaces[input_workspace] = TaskWorkspace(input_workspace, MODE_RO)\n rw_input_workspaces[input_workspace] = TaskWorkspace(input_workspace, MODE_RW)\n config.add_to_task('pre', workspaces=ro_input_workspaces)\n config.add_to_task('main', workspaces=ro_input_workspaces)\n # Post tasks have access to input workspaces in case input files need moved as part of parse results\n config.add_to_task('post', workspaces=rw_input_workspaces)\n\n # Configure output workspaces\n output_workspaces = {}\n for output_workspace in config.get_output_workspace_names():\n output_workspaces[output_workspace] = TaskWorkspace(output_workspace, MODE_RW)\n config.add_to_task('post', workspaces=output_workspaces)\n\n # Configure input/output mounts\n input_mnt_name = 'scale_input_mount'\n output_mnt_name = 'scale_output_mount'\n input_vol_name = get_job_exe_input_vol_name(job_exe)\n output_vol_name = get_job_exe_output_vol_name(job_exe)\n input_vol_ro = Volume(input_vol_name, SCALE_JOB_EXE_INPUT_PATH, MODE_RO, is_host=False)\n input_vol_rw = Volume(input_vol_name, SCALE_JOB_EXE_INPUT_PATH, MODE_RW, is_host=False)\n output_vol_ro = Volume(output_vol_name, SCALE_JOB_EXE_OUTPUT_PATH, MODE_RO, is_host=False)\n output_vol_rw = Volume(output_vol_name, SCALE_JOB_EXE_OUTPUT_PATH, MODE_RW, is_host=False)\n\n config.add_to_task('pre', mount_volumes={input_mnt_name: input_vol_rw, output_mnt_name: output_vol_rw},\n env_vars={'SYSTEM_LOGGING_LEVEL': system_logging_level})\n config.add_to_task('main', mount_volumes={input_mnt_name: input_vol_ro, output_mnt_name: output_vol_rw})\n config.add_to_task('post', mount_volumes={output_mnt_name: output_vol_ro},\n env_vars={'SYSTEM_LOGGING_LEVEL': system_logging_level})\n\n\n # Configure output directory\n env_vars = {'OUTPUT_DIR': SCALE_JOB_EXE_OUTPUT_PATH, 'INPUT_METADATA': SCALE_INPUT_METADATA_PATH}\n args = config._get_task_dict('main')['args']\n\n args = environment_expansion(env_vars, args)\n\n config.add_to_task('main', args=args, env_vars=env_vars)\n\n # Configure task resources\n resources = job_exe.get_resources()\n # Pull-task and pre-task require full amount of resources\n config.add_to_task('pull', resources=resources)\n config.add_to_task('pre', resources=resources)\n # Main-task no longer requires the input file space\n resources.subtract(NodeResources([Disk(job_exe.input_file_size)]))\n config.add_to_task('main', resources=resources)\n # Post-task no longer requires any disk space\n resources.remove_resource('disk')\n config.add_to_task('post', resources=resources)", "def get_test_config(cls, cluster, role, env, job, filler=''):\n return cls.CONFIG_BASE % {'job': job, 'role': role, 'env': env, 'cluster': cluster,\n 'inner': filler}", "def configure_queued_job(self, job):\n\n config = ExecutionConfiguration()\n data = job.get_job_data()\n\n # Add input file meta-data\n input_files_dict = self._create_input_file_dict(data)\n config.set_input_files(input_files_dict)\n\n # Set up env vars for job's input data\n input_values = data.get_injected_input_values(input_files_dict)\n interface = job.job_type_rev.get_input_interface()\n\n env_vars = {}\n if isinstance(data, JobData):\n # call job.data.job_data.JobData.get_injected_env_vars\n env_vars = data.get_injected_env_vars(input_files_dict, interface)\n else:\n # call old job.configuration.data.job_data.get_injected_env_vars\n # TODO: remove once old JobData class is no longer used\n env_vars = data.get_injected_env_vars(input_files_dict)\n\n task_workspaces = {}\n if job.job_type.is_system:\n # Add any workspaces needed for this system job\n task_workspaces = QueuedExecutionConfigurator._system_job_workspaces(job)\n else:\n # Set any output workspaces needed\n output_workspaces = {}\n if job.input and 'version' in job.input and job.input['version'] == '1.0':\n # Set output workspaces using legacy job data\n self._cache_workspace_names(data.get_output_workspace_ids())\n output_workspaces = {}\n for output, workspace_id in data.get_output_workspaces().items():\n output_workspaces[output] = self._cached_workspace_names[workspace_id]\n config.set_output_workspaces(output_workspaces)\n if not output_workspaces:\n # Set output workspaces from job configuration\n output_workspaces = {}\n job_config = job.get_job_configuration()\n interface = SeedManifest(job.job_type_rev.manifest, do_validate=False)\n for output_name in interface.get_file_output_names():\n output_workspace = job_config.get_output_workspace(output_name)\n if output_workspace:\n output_workspaces[output_name] = output_workspace\n config.set_output_workspaces(output_workspaces)\n\n # Create main task with fields populated from input data\n args = job.get_job_interface().get_injected_command_args(input_values, env_vars)\n config.create_tasks(['main'])\n config.add_to_task('main', args=args, env_vars=env_vars, workspaces=task_workspaces)\n return config", "def getConfig(self):\n\n\n\n config = self.testInit.getConfiguration()\n\n config.component_(\"Agent\")\n config.Agent.WMSpecDirectory = self.testDir\n config.Agent.agentName = 'testAgent'\n config.Agent.componentName = 'test'\n\n\n # First the general stuff\n config.section_(\"General\")\n config.General.workDir = os.getenv(\"TESTDIR\", self.testDir)\n\n # Now the CoreDatabase information\n # This should be the dialect, dburl, etc\n\n config.section_(\"CoreDatabase\")\n config.CoreDatabase.connectUrl = os.getenv(\"DATABASE\")\n config.CoreDatabase.socket = os.getenv(\"DBSOCK\")\n\n\n\n # JobCreator\n config.component_(\"JobCreator\")\n config.JobCreator.namespace = 'WMComponent.JobCreator.JobCreator'\n config.JobCreator.logLevel = 'DEBUG'\n config.JobCreator.maxThreads = 1\n config.JobCreator.UpdateFromResourceControl = True\n config.JobCreator.pollInterval = 10\n config.JobCreator.jobCacheDir = self.testDir\n config.JobCreator.defaultJobType = 'processing' #Type of jobs that we run, used for resource control\n config.JobCreator.workerThreads = 2\n config.JobCreator.componentDir = os.path.join(os.getcwd(), 'Components')\n\n\n\n # JobSubmitter\n config.component_(\"JobSubmitter\")\n config.JobSubmitter.namespace = 'WMComponent.JobSubmitter.JobSubmitter'\n config.JobSubmitter.logLevel = 'INFO'\n config.JobSubmitter.maxThreads = 1\n config.JobSubmitter.pollInterval = 10\n config.JobSubmitter.pluginName = 'CondorGlobusPlugin'\n config.JobSubmitter.pluginDir = 'JobSubmitter.Plugins'\n config.JobSubmitter.submitDir = os.path.join(self.testDir, 'submit')\n config.JobSubmitter.submitNode = os.getenv(\"HOSTNAME\", 'badtest.fnal.gov')\n config.JobSubmitter.submitScript = os.path.join(getWMBASE(),\n 'test/python/WMComponent_t/JobSubmitter_t',\n 'submit.sh')\n config.JobSubmitter.componentDir = os.path.join(os.getcwd(), 'Components')\n config.JobSubmitter.workerThreads = 2\n config.JobSubmitter.jobsPerWorker = 200\n\n\n\n\n # JobTracker\n config.component_(\"JobTracker\")\n config.JobTracker.logLevel = 'DEBUG'\n config.JobTracker.pollInterval = 10\n config.JobTracker.trackerName = 'CondorTracker'\n config.JobTracker.pluginDir = 'WMComponent.JobTracker.Plugins'\n config.JobTracker.componentDir = os.path.join(os.getcwd(), 'Components')\n config.JobTracker.runTimeLimit = 7776000 #Jobs expire after 90 days\n config.JobTracker.idleTimeLimit = 7776000\n config.JobTracker.heldTimeLimit = 7776000\n config.JobTracker.unknTimeLimit = 7776000\n\n\n\n # JobAccountant\n config.component_(\"JobAccountant\")\n config.JobAccountant.pollInterval = 60\n config.JobAccountant.componentDir = os.path.join(os.getcwd(), 'Components')\n config.JobAccountant.logLevel = 'INFO'\n\n\n\n # JobArchiver\n config.component_(\"JobArchiver\")\n config.JobArchiver.pollInterval = 60\n config.JobArchiver.logLevel = 'INFO'\n config.JobArchiver.logDir = os.path.join(self.testDir, 'logs')\n config.JobArchiver.componentDir = os.path.join(os.getcwd(), 'Components')\n config.JobArchiver.numberOfJobsToCluster = 1000\n\n\n\n # Task Archiver\n config.component_(\"TaskArchiver\")\n config.TaskArchiver.componentDir = self.testInit.generateWorkDir()\n config.TaskArchiver.WorkQueueParams = {}\n config.TaskArchiver.pollInterval = 60\n config.TaskArchiver.logLevel = 'INFO'\n config.TaskArchiver.timeOut = 0\n\n\n\n # JobStateMachine\n config.component_('JobStateMachine')\n config.JobStateMachine.couchurl = os.getenv('COUCHURL',\n 'mnorman:[email protected]:5984')\n config.JobStateMachine.couchDBName = \"mnorman_test\"\n\n\n # Needed, because this is a test\n os.makedirs(config.JobSubmitter.submitDir)\n\n\n return config", "def test_create_seed_validation(self):\n\n url = '/%s/job-types/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['name'] = 'validation'\n\n json_data = {\n 'icon_code': 'BEEF',\n 'is_published': True,\n 'docker_image': 'my-new-job-1.0.0-seed:1.0.0',\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)", "def test_build_from_template(temp_with_override, cli):\n # Create the book from the template\n book = temp_with_override / \"new_book\"\n _ = cli.invoke(commands.create, book.as_posix())\n build_result = cli.invoke(\n commands.build, [book.as_posix(), \"-n\", \"-W\", \"--keep-going\"]\n )\n assert build_result.exit_code == 0, build_result.output\n html = book.joinpath(\"_build\", \"html\")\n assert html.joinpath(\"index.html\").exists()\n assert html.joinpath(\"intro.html\").exists()", "def testJobCombinedProduct(databases):\n\n class CustomGenerator(DataGenerator):\n pass\n gen = CustomGenerator(databases)\n\n image = gen.createProduct('image', False, True)\n buildFw = gen.createFramework('build', [], [ image ])\n testFw = gen.createFramework('test', [ image ], [])\n buildTask1 = gen.createTask('build1', buildFw)\n buildTask2 = gen.createTask('build2', buildFw)\n testTask = gen.createTask('test', testFw)\n\n buildTR = gen.createTaskRunner(name='tr_build', capabilities=['build'])\n testTR = gen.createTaskRunner(name='tr_test', capabilities=['test'])\n\n def simulate(config):\n sanityCheck(gen, config)\n\n job, = config.createJobs(gen.owner)\n # TODO: The toXML functionality should probably be tested\n # in a separate test case.\n\n # Verify execution:\n # Successfully complete first build task.\n task = job.assignTask(databases.resourceDB[buildTR])\n assert task is not None\n assert task.getName().startswith('build')\n taskDone(job, task.getName())\n # Try to start test task (should fail).\n task = job.assignTask(databases.resourceDB[testTR])\n assert task is None\n # Complete second build task, but make it fail.\n task = job.assignTask(databases.resourceDB[buildTR])\n assert task is not None\n assert task.getName().startswith('build')\n taskDone(job, task.getName(), ResultCode.ERROR)\n # Try to start test task (should succeed).\n task = job.assignTask(databases.resourceDB[testTR])\n assert task is not None\n assert task.getName() == testTask\n # Successfully complete test task.\n taskDone(job, testTask)\n assert job.isExecutionFinished()\n assert job.hasFinalResult()\n # Check that locators have been stored separately.\n producers = set()\n for taskId, locator in job.getProduct(image).getProducers():\n assert taskId.startswith('build')\n assert locator == locatorForTask(taskId)\n\n runWithReload(databases, gen.createConfiguration(), simulate)", "def create_job_ymls(job_template_args, job_template_dir, job_template_name, output_dir, upload=True):\n for i, template_args in enumerate(job_template_args):\n template_loader = jinja2.FileSystemLoader(searchpath=job_template_dir)\n template_env = jinja2.Environment(loader=template_loader)\n template_file = job_template_name\n template = template_env.get_template(template_file)\n output_text = template.render(template_args)\n job_yml = 'vista-job-{}.yml'.format(i)\n job_yml_path = join(output_dir, job_yml)\n \n if not exists(output_dir):\n mkdir(output_dir)\n with open(job_yml_path, 'w') as output_template:\n output_template.write(output_text)\n if upload:\n upload_blob(UPLOAD_BUCKET, job_yml_path, job_yml)\n print(job_yml, 'uploaded')", "def run(params):\n jobs_config_file = os.path.join(CONFIG_PATH, 'jobs.yaml')\n\n jenkins_config_file = os.path.join(RESOURCE_PATH, 'jobs', 'config')\n\n jobs_path = os.path.join(RESOURCE_PATH, 'jobs')\n\n jobs = list(JobGenerator(jobs_config_file).jobs())\n\n if params.jobs:\n jobs = [job for job in jobs if fnmatch.fnmatch(job.name, params.jobs)]\n yaml_obj = [job.get_object() for job in jobs]\n if params.config:\n yaml_file = open(params.config, 'w')\n yaml_path = params.config\n else:\n yaml_file = tempfile.NamedTemporaryFile(\n prefix='libvirt_ci-jobs-', suffix='.yaml',\n dir=jobs_path, delete=False)\n yaml_path = yaml_file.name\n try:\n yaml.dump(yaml_obj, stream=yaml_file, indent=4,\n default_flow_style=False)\n yaml_file.close()\n\n if params.only_config:\n return\n\n cmd = \"jenkins-jobs\"\n cmd += \" --conf %s\" % jenkins_config_file\n if params.test:\n cmd += \" test\"\n else:\n cmd += \" update\"\n\n cmd += \" -r %s\" % jobs_path\n if params.jobs:\n cmd += \" %s\" % params.jobs\n # Ignore standard output of jenkins-job-builder\n cmd += \" > /dev/null\"\n\n utils.run(cmd, debug=True, ignore_fail=False, timeout=3600)\n finally:\n if params.only_config:\n LOGGER.info('Keep job file %s', yaml_path)\n else:\n try:\n LOGGER.info('Removing job file %s', yaml_path)\n os.remove(yaml_path)\n except (OSError, IOError) as details:\n LOGGER.warning('Failed to remove job file %s: %s',\n yaml_file.name, details)", "def test_init(self):\n self.assertEqual(self.job.InputArgs, {'ParamFile': 'test.txt', \n 'SubmitToTestDB': 'True', \n 'UserId': '1', 'Mapping': '5', \n 'ProcessOnly': 'True', \n 'StudyID': '2', 'SFF': '10', \n 'SeqPlatform': 'FLX'})\n self.assertEqual(self.job.OracleJobName, 'jobname')", "def test_custom_config(cli, build_resources):\n books, _ = build_resources\n config = books.joinpath(\"config\")\n result = cli.invoke(commands.build, [config.as_posix(), \"-n\", \"-W\", \"--keep-going\"])\n assert result.exit_code == 0, result.output\n html = config.joinpath(\"_build\", \"html\", \"index.html\").read_text(encoding=\"utf8\")\n soup = BeautifulSoup(html, \"html.parser\")\n assert '<p class=\"title logo__title\">TEST PROJECT NAME</p>' in html\n assert '<div class=\"sphinx-tabs docutils container\">' in html\n assert '<link rel=\"stylesheet\" type=\"text/css\" href=\"_static/mycss.css\" />' in html\n assert '<script src=\"_static/js/myjs.js\"></script>' in html\n\n # Check that our comments engines were correctly added\n assert soup.find(\"script\", attrs={\"kind\": \"hypothesis\"})\n assert soup.find(\"script\", attrs={\"kind\": \"utterances\"})", "def test_jobs_successful(self):\n\n workspace = storage_test_utils.create_workspace()\n file1 = storage_test_utils.create_file()\n data_dict = {\n 'version': '1.0',\n 'input_data': [{\n 'name': 'INPUT_IMAGE',\n 'file_id': file1.id\n }],\n 'output_data': [{\n 'name': 'output_file_pngs',\n 'workspace_id': workspace.id\n }]}\n\n secret_configuration = {\n 'version': '6',\n 'priority': 50,\n 'output_workspaces': {'default': storage_test_utils.create_workspace().name},\n 'settings': {\n 'DB_HOST': 'som.host.name',\n 'DB_PASS': 'secret_password'\n }\n }\n\n seed_job_type = job_test_utils.create_seed_job_type(configuration=secret_configuration)\n seed_job = job_test_utils.create_job(job_type=seed_job_type, status='RUNNING', input=data_dict)\n\n url = '/%s/jobs/%d/' % (self.api, seed_job.id)\n response = self.client.generic('GET', url)\n result = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n self.assertEqual(result['configuration']['priority'],50)\n self.assertNotIn('DB_PASS', result['configuration']['settings'])", "def write_config_for_job(job_script, filename, include_defaults, fail_on_missing):\n j = Job()\n j.script = job_script\n j._load_script()\n component_names = [c.__class__.__name__ for c in j.components]\n write_config('job.cfg', component_names, include_defaults, fail_on_missing)" ]
[ "0.8150813", "0.7224147", "0.69114107", "0.68947124", "0.6698226", "0.6636703", "0.6432527", "0.6272015", "0.6242813", "0.6228083", "0.6216085", "0.6208851", "0.6188286", "0.6177688", "0.6151176", "0.6108306", "0.60626", "0.6059457", "0.60078806", "0.5989153", "0.59566575", "0.59530866", "0.59427214", "0.5942439", "0.5934513", "0.59255564", "0.5911237", "0.58851564", "0.58828646", "0.5860716" ]
0.8035178
1
Write the position and vector field in XSF format.
def write_xsf(imode, atoms, vector, scale=1.0): vector = np.asarray(vector, dtype=float) * scale assert vector.shape == atoms.positions.shape pos_vec = np.hstack((atoms.positions, vector)) nions = pos_vec.shape[0] chem_symbs = atoms.get_chemical_symbols() with open('mode_{:04d}.xsf'.format(imode), 'w') as out: line = "CRYSTAL\n" line += "PRIMVEC\n" line += '\n'.join([ ' '.join(['%21.16f' % a for a in vec]) for vec in atoms.cell ]) line += "\nPRIMCOORD\n" line += "{:3d} {:d}\n".format(nions, 1) line += '\n'.join([ '{:3s}'.format(chem_symbs[ii]) + ' '.join(['%21.16f' % a for a in pos_vec[ii]]) for ii in range(nions) ]) out.write(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def writeXYZPos(self,phys,xyzname):\r\n XYZWriter.XYZWriter(xyzname).write(phys.posvec, phys.myTop.atoms, phys.myTop.atomTypes)", "def output(self):\n to_write = 'X '\n to_write += str(self.def_field['name'])+' '\n to_write += str(self.def_field['pin_number'])+' '\n to_write += str(self.def_field['x'])+' '\n to_write += str(self.def_field['y'])+' '\n to_write += str(self.def_field['length'])+' '\n to_write += self.def_field['direction']+' '\n to_write += str(self.def_field['size_num'])+' '\n to_write += str(self.def_field['size_name'])+' '\n #to_write += str(self.def_field['part'])+' '\n to_write += str(self.def_field['dmg'])+' '\n to_write += str(self.def_field['type'])+' '\n to_write += self.def_field['shape']\n to_write += '\\n'\n return to_write", "def savepos(self):\n self.out.write(self.csi + \"s\")", "def output(self):\n to_write = 'S '\n \"\"\"\n print self.def_field\n for key in self.def_field:\n print key,\"=\", self.def_field[key]\n \"\"\"\n to_write += str(self.offset[0] + self.def_field['x1'])+' '\n to_write += str(self.offset[1] + self.def_field['y1'])+' '\n to_write += str(self.offset[0] + self.def_field['x2'])+' '\n to_write += str(self.offset[1] + self.def_field['y2'])+' '\n to_write += str(self.def_field['part'])+' '\n to_write += str(self.def_field['dmg'])+' '\n to_write += str(self.def_field['pen'])+' '\n to_write += self.def_field['fill']+'\\n'\n return to_write", "def _write(self):\n f = FortranFile(self.filename,mode='w')\n # Default omnivor binary header\n f.writeInts ( self.data['MK'] , 'i' ) \n f.writeInts ( self.data['itime'] , 'i' ) \n f.writeString ( self.data['version'] ) \n f.writeInts ( self.data['file_id'] , 'i' ) \n f.writeString ( self.data['sversion'] ) \n # Velocity field\n f.writeString ( self.data['stype'] ) \n f.writeInts ( self.data['is_grid'] , 'i' ) \n f.writeInts ( self.data['nCPs'] , 'i' ) \n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n f.writeInts ( self.data['n1'] , 'i' ) \n f.writeInts ( self.data['n2'] , 'i' ) \n f.writeInts ( self.data['n3'] , 'i' ) \n f.writeInts ( self.data['is_straight'] , 'i' ) \n f.writeReals ( self.data['v1'] , real_char ) \n f.writeReals ( self.data['v2'] , real_char ) \n f.writeReals ( self.data['v3'] , real_char ) \n\n CPs = self.data['CPs'].flatten(order = 'F')\n Utot = self.data['Utot'].flatten(order = 'F')\n f.writeReals(CPs,real_char)\n f.writeReals(Utot,real_char)", "def write_xyz(self, filename):\n df = self.contents[['Element', 'X', 'Y', 'Z']].copy()\n np.savetxt(filename, df.values, fmt='%s' + '%20.15f' * 3,\n header=f\"{self.numatom}\\n{self.comment}\", comments=\"\")", "def writePDBPos(self,phys,pdbname):\r\n PDBWriter.PDBWriter(pdbname).write(phys.posvec, phys.myPDB)", "def print_vector(self):\n print self.x, self.y, self.z", "def save_to_xyz(self, filename): \n with open( filename, 'a' ) as F:\n F = open( filename, 'a' )\n F.write( '%d\\n'%self.num_atoms )\n F.write( \"XYZ\\n\" )\n for num,row in enumerate(self.atoms):\n try:\n F.write('%s '%self.species[num])\n except:\n F.write('X%d '%num)\n F.write( mat2str( row, \"%16.10f\" ) )\n F.write( \"\\n\" )", "def write_vectors(self, filename):\n svu.write_realvectors(self,filename)", "def write_psf(self):\n # **********************************\n # **********************************\n # psf writer (start)\n # **********************************\n # **********************************\n\n print(\"******************************\")\n print(\"\")\n print(\n \"The charmm X-plor format psf writer (the write_psf function) is running\"\n )\n\n date_time = datetime.datetime.today()\n\n print(\n \"write_psf: forcefield_selection = {}, residues = {}\".format(\n self.forcefield_selection, self.residues\n )\n )\n\n print(\"******************************\")\n print(\"\")\n\n if self.structure_box_1:\n list_of_structures = [\n self.structure_box_0_ff,\n self.structure_box_1_ff,\n ]\n list_of_file_names = [self.filename_box_0, self.filename_box_1]\n stuct_only = [self.structure_box_0_ff, self.structure_box_1_ff]\n else:\n list_of_structures = [self.structure_box_0_ff]\n list_of_file_names = [self.filename_box_0]\n stuct_only = [self.structure_box_0_ff]\n\n for q in range(0, len(list_of_structures)):\n stuct_iteration = list_of_structures[q]\n file_name_iteration = list_of_file_names[q]\n output = str(file_name_iteration) + \".psf\"\n stuct_only_iteration = stuct_only[q]\n # Lammps syntax depends on the functional form\n # Infer functional form based on the properties of the stuct_iteration\n if self.detect_forcefield_style:\n # Check for angles\n if len(stuct_iteration.urey_bradleys) > 0:\n print(\n \"Warning: Urey bradley terms detected. GOMC does no support the Urey-Bradley terms\"\n )\n warn(\n \"warning: Urey bradley terms detected. \"\n \"GOMC does no support the Urey-Bradley terms\"\n )\n use_urey_bradleys = True\n else:\n print(\"No urey bradley terms detected\")\n use_urey_bradleys = False\n\n # Check for dihedrals\n if len(stuct_iteration.rb_torsions) > 0:\n print(\n \"RB Torsions detected, will converted to CHARMM Dihedrals\"\n )\n use_rb_torsions = True\n dihedrals_list = stuct_iteration.rb_torsions\n dihedrals = [\n [\n dihedral.atom1.idx + 1,\n dihedral.atom2.idx + 1,\n dihedral.atom3.idx + 1,\n dihedral.atom4.idx + 1,\n ]\n for dihedral in stuct_iteration.rb_torsions\n ]\n else:\n use_rb_torsions = False\n\n if len(stuct_iteration.dihedrals) > 0:\n print(\n \"Charmm dihedrals detected, so CHARMM Dihedrals will remain\"\n )\n use_dihedrals = True\n dihedrals_list = stuct_iteration.dihedrals\n dihedrals = [\n [\n dihedral.atom1.idx + 1,\n dihedral.atom2.idx + 1,\n dihedral.atom3.idx + 1,\n dihedral.atom4.idx + 1,\n ]\n for dihedral in stuct_iteration.dihedrals\n ]\n else:\n use_dihedrals = False\n if (use_rb_torsions is False) and (use_dihedrals is False):\n dihedrals_list = []\n dihedrals = []\n if use_rb_torsions and use_dihedrals:\n warn(\n \"Multiple dihedral styles detected, check your \"\n \"Forcefield XML and structure files\"\n )\n\n # Check for impropers\n for dihedral in stuct_iteration.dihedrals:\n if dihedral.improper:\n warn(\n \"ERROR: Amber-style impropers are currently not supported in GOMC\"\n )\n\n impropers_list = stuct_iteration.impropers\n impropers = [\n [\n improper.atom1.idx + 1,\n improper.atom2.idx + 1,\n improper.atom3.idx + 1,\n improper.atom4.idx + 1,\n ]\n for improper in stuct_iteration.impropers\n ]\n\n no_atoms = len(stuct_iteration.atoms)\n no_bonds = len(stuct_iteration.bonds)\n no_angles = len(stuct_iteration.angles)\n\n no_dihedrals = len(dihedrals)\n no_impropers = len(impropers)\n\n no_donors = len(stuct_iteration.donors)\n no_acceptors = len(stuct_iteration.acceptors)\n no_groups = len(stuct_iteration.groups)\n\n # psf printing (start)\n\n residue_data_list = []\n residue_names_list = []\n for k, atom in enumerate(stuct_only_iteration.atoms):\n residue_data_list.append(str(atom.residue))\n residue_names_list.append(atom.residue.name)\n\n unique_residue_data_dict = {}\n unique_residue_data_list = []\n residue_data_name_list = []\n\n for m, residue in enumerate(stuct_only_iteration.residues):\n unique_residue_data_list.append(\n str(stuct_only_iteration.residues[m])\n )\n unique_residue_data_dict.update(\n {unique_residue_data_list[m]: m + 1}\n )\n residue_data_name_list.append(\n stuct_only_iteration.residues[m].name\n )\n\n res_no_chain_iter_corrected = []\n residue_id_list = []\n residue_id_adder_fixed_struct_wo_bonds = 0\n for f, PSF_atom_iteration_0 in enumerate(\n stuct_only_iteration.atoms\n ):\n if f > 0:\n if (\n PSF_atom_iteration_0.residue.chain\n == previous_residue_chain\n and len(PSF_atom_iteration_0.bonds) == 0\n ):\n residue_id_adder_fixed_struct_wo_bonds += 1\n\n previous_residue_chain = PSF_atom_iteration_0.residue.chain\n\n residue_id_int = int(\n unique_residue_data_dict[residue_data_list[f]]\n + residue_id_adder_fixed_struct_wo_bonds\n )\n res_id_adder = int(\n (residue_id_int % self.max_residue_no) % self.max_residue_no\n )\n if int(res_id_adder) == 0:\n res_no_iteration_corrected = int(self.max_residue_no)\n else:\n res_no_iteration_corrected = res_id_adder\n\n res_no_chain_iter_corrected.append(res_no_iteration_corrected)\n residue_id_list.append(residue_id_int)\n\n output_write = genopen(output, \"w\")\n\n first_indent = \"%8s\"\n psf_formating = (\n \"%8s %-4s %-4s %-4s %-4s %4s %10.6f %13.4f\" + 11 * \" \"\n )\n\n output_write.write(\"PSF \")\n output_write.write(\"\\n\\n\")\n\n no_of_remarks = 3\n output_write.write(first_indent % no_of_remarks + \" !NTITLE\\n\")\n output_write.write(\n \" REMARKS this file \"\n + file_name_iteration\n + \" - created by MoSDeF-GOMC using the\"\n + \"\\n\"\n )\n output_write.write(\n \" REMARKS parameters from the \"\n + str(self.forcefield_selection)\n + \" force field via MoSDef\\n\"\n )\n output_write.write(\n \" REMARKS created on \" + str(date_time) + \"\\n\\n\\n\"\n )\n\n # This converts the atom name in the GOMC psf and pdb files to unique atom names\n print(\n \"bead_to_atom_name_dict = {}\".format(\n self.bead_to_atom_name_dict\n )\n )\n [\n unique_individual_atom_names_dict,\n individual_atom_names_list,\n missing_bead_to_atom_name,\n ] = unique_atom_naming(\n stuct_only_iteration,\n residue_id_list,\n residue_names_list,\n bead_to_atom_name_dict=self.bead_to_atom_name_dict,\n )\n\n if None in [\n unique_individual_atom_names_dict,\n individual_atom_names_list,\n missing_bead_to_atom_name,\n ]:\n self.input_error = True\n print_error_message = (\n \"ERROR: The unique_atom_naming function failed while \"\n \"running the charmm_writer function. Ensure the proper inputs are \"\n \"in the bead_to_atom_name_dict.\"\n )\n raise ValueError(print_error_message)\n\n # ATOMS: Calculate the atom data\n # psf_formating is conducted for the for CHARMM format (i.e., atom types are base 52, letters only)\n output_write.write(first_indent % no_atoms + \" !NATOM\\n\")\n for i_atom, PSF_atom_iteration_1 in enumerate(\n stuct_iteration.atoms\n ):\n segment_id = PSF_atom_iteration_1.residue.segid or \"SYS\"\n atom_type_iter = base10_to_base52_alph(\n self.atom_types_to_index_value_dict[\n PSF_atom_iteration_1.type\n + \"_\"\n + PSF_atom_iteration_1.residue.name\n ]\n )\n\n atom_lines_iteration = psf_formating % (\n i_atom + 1,\n segment_id,\n res_no_chain_iter_corrected[i_atom],\n str(residue_names_list[i_atom])[: self.max_resname_char],\n individual_atom_names_list[i_atom],\n atom_type_iter,\n PSF_atom_iteration_1.charge,\n PSF_atom_iteration_1.mass,\n )\n\n output_write.write(\"%s\\n\" % atom_lines_iteration)\n\n output_write.write(\"\\n\")\n\n # BONDS: Calculate the bonding data\n output_write.write(first_indent % no_bonds + \" !NBOND: bonds\\n\")\n for i_bond, PSF_bond_iteration_1 in enumerate(\n stuct_iteration.bonds\n ):\n output_write.write(\n (first_indent * 2)\n % (\n PSF_bond_iteration_1.atom1.idx + 1,\n PSF_bond_iteration_1.atom2.idx + 1,\n )\n )\n\n if (i_bond + 1) % 4 == 0:\n output_write.write(\"\\n\")\n\n if no_bonds % 4 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_bonds == 0:\n output_write.write(\"\\n\")\n\n # ANGLES: Calculate the angle data\n output_write.write(first_indent % no_angles + \" !NTHETA: angles\\n\")\n for i_angle, angle_iteration in enumerate(stuct_iteration.angles):\n output_write.write(\n (first_indent * 3)\n % (\n angle_iteration.atom1.idx + 1,\n angle_iteration.atom2.idx + 1,\n angle_iteration.atom3.idx + 1,\n )\n )\n\n if (i_angle + 1) % 3 == 0:\n output_write.write(\"\\n\")\n\n if no_angles % 3 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_angles == 0:\n output_write.write(\"\\n\")\n\n # DIHEDRALS: Calculate the dihedral data\n output_write.write(\n first_indent % no_dihedrals + \" !NPHI: dihedrals\\n\"\n )\n for i_dihedral, dihedral_iter in enumerate(dihedrals_list):\n (\n dihedral_atom_1,\n dihedral_atom_2,\n dihedral_atom_3,\n dihedral_atom_4,\n ) = (\n dihedral_iter.atom1,\n dihedral_iter.atom2,\n dihedral_iter.atom3,\n dihedral_iter.atom4,\n )\n\n output_write.write(\n (first_indent * 4)\n % (\n dihedral_atom_1.idx + 1,\n dihedral_atom_2.idx + 1,\n dihedral_atom_3.idx + 1,\n dihedral_atom_4.idx + 1,\n )\n )\n\n if (i_dihedral + 1) % 2 == 0:\n output_write.write(\"\\n\")\n\n if no_dihedrals % 2 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_dihedrals == 0:\n output_write.write(\"\\n\")\n\n # IMPROPERS: Calculate the improper data\n output_write.write(\n first_indent % no_impropers + \" !NIMPHI: impropers\\n\"\n )\n for i_improper, improper_iter in enumerate(impropers_list):\n (\n improper_atom_1,\n improper_atom_2,\n improper_atom_3,\n improper_atom_4,\n ) = (\n improper_iter.atom1,\n improper_iter.atom2,\n improper_iter.atom3,\n improper_iter.atom4,\n )\n\n output_write.write(\n (first_indent * 4)\n % (\n improper_atom_1.idx + 1,\n improper_atom_2.idx + 1,\n improper_atom_3.idx + 1,\n improper_atom_4.idx + 1,\n )\n )\n\n if (i_improper + 1) % 2 == 0:\n output_write.write(\"\\n\")\n\n if no_impropers % 2 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_impropers == 0:\n output_write.write(\"\\n\")\n\n # DONOR: calculate the donor data\n output_write.write(first_indent % no_donors + \" !NDON: donors\\n\")\n for donor_i, donor_iter in enumerate(stuct_iteration.donors):\n output_write.write(\n (first_indent * 2)\n % (donor_iter.atom1.idx + 1, donor_iter.atom2.idx + 1)\n )\n if (donor_i + 1) % 4 == 0:\n output_write.write(\"\\n\")\n\n if no_donors % 4 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_donors == 0:\n output_write.write(\"\\n\")\n\n # ACCEPTOR: calculate the acceptor data\n output_write.write(\n first_indent % no_acceptors + \" !NACC: acceptors\\n\"\n )\n for acceptor_i, acceptor_iter in enumerate(\n stuct_iteration.acceptors\n ):\n output_write.write(\n (first_indent * 2)\n % (acceptor_iter.atom1.idx + 1, acceptor_iter.atom2.idx + 1)\n )\n if (acceptor_i + 1) % 4 == 0:\n output_write.write(\"\\n\")\n\n if no_acceptors % 4 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_acceptors == 0:\n output_write.write(\"\\n\")\n\n # NNB: calculate the NNB data\n output_write.write(first_indent % 0 + \" !NNB\\n\\n\")\n for nbb_i, atoms_iter in enumerate(stuct_iteration.atoms):\n output_write.write(first_indent % 0)\n if (nbb_i + 1) % 8 == 0:\n output_write.write(\"\\n\")\n\n if no_atoms % 8 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_atoms == 0:\n output_write.write(\"\\n\")\n\n # GROUP: calculate the group data\n try:\n group_data = stuct_iteration.groups.nst2\n except AttributeError:\n group_data = 0\n output_write.write(\n (first_indent * 2) % (no_groups or 1, group_data) + \" !NGRP \\n\"\n )\n if stuct_iteration.groups is True:\n for group_i, group_iter in enumerate(stuct_iteration.groups):\n output_write.write(\n (first_indent * 3)\n % (\n group_iter.atom.idx,\n group_iter.type,\n group_iter.move,\n )\n )\n if (group_i + 1) % 3 == 0:\n output_write.write(\"\\n\")\n\n if no_groups % 3 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_groups == 0:\n output_write.write(\"\\n\")\n\n else:\n structure_abs_charge_value = abs(\n sum(\n atom_charge_iter.charge\n for atom_charge_iter in stuct_iteration.atoms\n )\n )\n if structure_abs_charge_value < 1.0e-4:\n group_type = 1\n else:\n group_type = 2\n output_write.write((first_indent * 3) % (0, group_type, 0))\n output_write.write(\"\\n\")\n\n output_write.write(\"\\n\")\n output_write.close()\n # **********************************\n # **********************************\n # psf writer (end)\n # **********************************\n # **********************************", "def _write_particle_information(\n xml_file, structure, xyz, forcefield, ref_distance, ref_mass, ref_energy\n):\n xml_file.write('<position units=\"sigma\" num=\"{}\">\\n'.format(xyz.shape[0]))\n for pos in xyz:\n xml_file.write(\"{}\\t{}\\t{}\\n\".format(*pos / ref_distance))\n xml_file.write(\"</position>\\n\")\n if forcefield:\n types = [atom.type for atom in structure.atoms]\n else:\n types = [atom.name for atom in structure.atoms]\n\n xml_file.write(\"<type>\\n\")\n for atom_type in types:\n xml_file.write(\"{}\\n\".format(atom_type))\n xml_file.write(\"</type>\\n\")\n\n masses = [atom.mass for atom in structure.atoms]\n xml_file.write(\"<mass>\\n\")\n for mass in masses:\n if mass == 0:\n mass = 1.0\n xml_file.write(\"{}\\n\".format(mass / ref_mass))\n xml_file.write(\"</mass>\\n\")\n\n charges = [atom.charge for atom in structure.atoms]\n xml_file.write(\"<charge>\\n\")\n e0 = 2.396452e-04 # e^2 mol/(kcal A), permittivity of free space\n charge_factor = (4.0 * np.pi * e0 * ref_distance * ref_energy) ** 0.5\n for charge in charges:\n xml_file.write(\"{}\\n\".format(charge / charge_factor))\n xml_file.write(\"</charge>\\n\")\n if forcefield:\n pair_coeffs = list(\n set(\n (atom.type, atom.epsilon, atom.sigma)\n for atom in structure.atoms\n )\n )\n pair_coeffs.sort(key=lambda pair_type: pair_type[0])\n xml_file.write(\"<pair_coeffs>\\n\")\n for param_set in pair_coeffs:\n xml_file.write(\n \"{}\\t{:.4f}\\t{:.4f}\\n\".format(\n param_set[0],\n param_set[1] / ref_energy,\n param_set[2] / ref_distance,\n )\n )\n xml_file.write(\"</pair_coeffs>\\n\")", "def dumpData(self,out,index):\n #--SCVR\n out.pack('4siBB2sB',\n 'SCVR', 5+len(self.text), index+48, self.type, self.func, self.oper)\n if self.text: out.write(self.text)\n #--Value\n if isinstance(self.value,int):\n out.packSub('INTV','i', self.value)\n else:\n out.packSub('FLTV','f', self.value)", "def WriteFile( self ):\n with open( \"BasisVector.in\" , \"w\" ) as outfile:\n firstLine = \" \" + str( self.NQ ) + \\\n \" \" + str( self.Nbranches ) + \\\n \" \" + str( self.NatomsUC ) + \\\n \" \" + str( self.dim ) + \"\\n\"\n outfile.write( firstLine )\n for qq in range( self.NQ ): ## loop over Q vectors\n lineQ = [ \"{:15.8f}\".format( x ) for x in \n self.QVectors[ qq , : ] ]\n lineQ = \"\".join( lineQ )\n outfile.write( lineQ + \"\\n\" )\n for branch in range( self.Nbranches ): ## loop over branches\n for atom in range( self.NatomsUC ): ## loop over atoms in unit cell\n line = [ \"{:15.8f}\".format( x ) for x in \n self.EigenVectors[ qq , branch , atom , : ] ]\n line = \"\".join( line )\n outfile.write( line + \"\\n\" )\n outfile.write( \"\\n\" )\n outfile.write( \"\\n\" )", "def vec_x(self):\t\r\n if self.ox != 0:\r\n ov = self.ox\r\n lv = self.self.lx + self.ox\r\n else:\r\n ov = self.dx / 2\r\n lv = self.lx\r\n\r\n xv = \"\"\r\n for num in np.arange(ov, lv, self.dx):\r\n xv += str(num) + \" \"\r\n\r\n return xv", "def write(self, fileW):\n fileW.wFloat(self.x)\n fileW.wFloat(self.y)\n fileW.wFloat(self.z)", "def write_pos(sposcar,ngrid,nspecies,filename):\n pos=np.dot(sposcar[\"lattvec\"],sposcar[\"positions\"])\n ntot=ngrid[0]*ngrid[1]*ngrid[2]*nspecies\n np_icell=np.empty((3,ntot),dtype=np.intc)\n car=pos\n np_ispecies=np.empty(ntot,dtype=np.intc)\n icell=np_icell\n ispecies=np_ispecies\n\n f=StringIO.StringIO()\n\n for ii in xrange(ntot):\n tmp,ispecies[ii]=divmod(ii,nspecies)\n tmp,icell[0,ii]=divmod(tmp,ngrid[0])\n icell[2,ii],icell[1,ii]=divmod(tmp,ngrid[1])\n car[0,ii],car[1,ii],car[2,ii]=np.dot(sposcar[\"lattvec\"],sposcar[\"positions\"][:,ii])*10\n f.write(\"{:>6d} {:>6d} {:>15.10f} {:>15.10f} {:>15.10f}\\n\".\n format(ii+1,ispecies[ii]+1, car[0,ii],car[1,ii],car[2,ii]))\n ffinal=open(filename,\"w\")\n ffinal.write(f.getvalue())\n f.close()\n ffinal.close()", "def write_vecs(self, vecs_fname):\r\n header = f'{self.vectors.shape[0]} {self.vectors.shape[1]}'\r\n np.savetxt(vecs_fname, np.hstack([self.words.reshape(-1, 1), self.vectors]), fmt='%s', header=header)", "def _write_dx(self, FN, data):\n n_points = data['counts'][0] * data['counts'][1] * data['counts'][2]\n if FN.endswith('.dx'):\n F = open(FN, 'w')\n else:\n import gzip\n F = gzip.open(FN, 'w')\n\n F.write(\"\"\"object 1 class gridpositions counts {0[0]} {0[1]} {0[2]}\norigin {1[0]} {1[1]} {1[2]}\ndelta {2[0]} 0.0 0.0\ndelta 0.0 {2[1]} 0.0\ndelta 0.0 0.0 {2[2]}\nobject 2 class gridconnections counts {0[0]} {0[1]} {0[2]}\nobject 3 class array type double rank 0 items {3} data follows\n\"\"\".format(data['counts'], data['origin'], data['spacing'], n_points))\n\n for start_n in range(0, len(data['vals']), 3):\n F.write(' '.join(['%6e' % c\n for c in data['vals'][start_n:start_n + 3]]) + '\\n')\n\n F.write('object 4 class field\\n')\n F.write('component \"positions\" value 1\\n')\n F.write('component \"connections\" value 2\\n')\n F.write('component \"data\" value 3\\n')\n F.close()", "def write_node_shp(self,shpname,extra_fields=[]):\n assert len(extra_fields)==0 # not yet supported!\n\n # zero-based index of node (why does write_edge_shp create 1-based ids?)\n base_dtype = [('node_id',np.int32)]\n\n node_geoms=[geometry.Point( self.nodes['x'][i] )\n for i in self.valid_node_iter() ]\n\n node_data=self.nodes[~self.nodes['deleted']].copy()\n\n # don't need to write all of the original fields out:\n node_data=utils.recarray_del_fields(node_data,['x','deleted'])\n\n wkb2shp.wkb2shp(shpname,input_wkbs=node_geoms,fields=node_data,\n overwrite=True)", "def _writeVTKOutput(self):\n\n sigma = numpy.ones((self.numStations, 3), dtype=numpy.float64)\n sigma[:, 0] *= self.sigmaEast\n sigma[:, 1] *= self.sigmaNorth\n sigma[:, 2] *= self.sigmaUp\n\n vtkHead = \"# vtk DataFile Version 2.0\\n\" + \\\n \"Synthetic GPS stations\\n\" + \\\n \"ASCII\\n\" + \\\n \"DATASET POLYDATA\\n\" + \\\n \"POINTS \" + repr(self.numStations) + \" double\\n\"\n\n v = open(self.vtkOutputFile, 'w')\n v.write(vtkHead)\n numpy.savetxt(v, self.coords)\n\n numConnect = 2 * self.numStations\n connectHead = \"VERTICES %d %d\\n\" % (self.numStations, numConnect)\n v.write(connectHead)\n verts = numpy.arange(self.numStations, dtype=numpy.int64)\n sizes = numpy.ones_like(verts)\n outConnect = numpy.column_stack((sizes, verts))\n numpy.savetxt(v, outConnect, fmt=\"%d\")\n \n dispHead = \"POINT_DATA \" + repr(self.numStations) + \"\\n\" + \\\n \"VECTORS displacement double\\n\"\n v.write(dispHead)\n numpy.savetxt(v, self.dispNoise)\n\n sigHead = \"VECTORS uncertainty double\\n\"\n v.write(sigHead)\n numpy.savetxt(v, sigma)\n v.close()\n \n return", "def to_xyz(self, extended_xyz: bool = True,\n print_stds: bool = False,\n print_forces : bool = False,\n print_max_stds: bool = False,\n write_file: str = '')->str:\n species_list = [Z_to_element(x) for x in self.coded_species]\n xyz_str = ''\n xyz_str += f'{len(self.coded_species)} \\n'\n\n # Add header line with info about lattice and properties if extended\n # xyz option is called.\n if extended_xyz:\n cell = self.cell\n\n xyz_str += f'Lattice=\"{cell[0,0]} {cell[0,1]} {cell[0,2]}'\n xyz_str += f' {cell[1,0]} {cell[1,1]} {cell[1,2]}'\n xyz_str += f' {cell[2,0]} {cell[2,1]} {cell[2,2]}\"'\n xyz_str += f' Proprties=\"species:S:1:pos:R:3'\n\n if print_stds:\n xyz_str += ':stds:R:3'\n stds = self.stds\n if print_forces:\n xyz_str += ':forces:R:3'\n forces = self.forces\n if print_max_stds:\n xyz_str += ':max_std:R:1'\n xyz_str += '\\n'\n else:\n xyz_str += '\\n'\n\n for i, pos in enumerate(self.positions):\n # Write positions\n xyz_str += f\"{species_list[i]} {pos[0]} {pos[1]} {pos[2]}\"\n\n # If extended XYZ: Add in extra information\n if print_stds and extended_xyz:\n xyz_str += f\" {stds[i,0]} {stds[i,1]} {stds[i,2]}\"\n if print_forces and extended_xyz:\n xyz_str += f\" {forces[i,0]} {forces[i,1]} {forces[i,2]}\"\n if print_max_stds and extended_xyz:\n xyz_str += f\" {np.max(stds[i,:])} \"\n xyz_str += '\\n'\n\n # Write to file, optionally\n if write_file:\n with open(write_file, 'w') as f:\n f.write(xyz_str)\n\n return xyz_str", "def write_vec(f, vec, name, vec_type):\n f.write('%s %s[%d] = {\\n' % (vec_type, name, len(vec)))\n\n # Write vector elements\n for i in range(len(vec)):\n if vec_type == 'c_float':\n f.write('(c_float)%.20f,\\n' % vec[i])\n else:\n f.write('%i,\\n' % vec[i])\n\n f.write('};\\n')", "def writePosFilesStep(self): \n \n writeSetOfCoordinates(self._getExtraPath(), self.inputCoordinatesTiltedPairs.get().getUntilted())\n \n writeSetOfCoordinates(self._getExtraPath(), self.inputCoordinatesTiltedPairs.get().getTilted())", "def get_xyz(self)->str:\n xyz_str = str(len(self.POSITION)) + \"\\n\"\n xyz_str += \"# \"+str(self.TITLE.content[0])\n xyz_str += \"# exported wit PyGromosTools\\n\"\n xyz_format = \"{:<3}\\t{:> 3.9f} {:> 3.9f} {:> 3.9f}\\n\"\n\n for position in self.POSITION:\n xyz_line = xyz_format.format(position.atomType[0], position.xp * 10, position.yp * 10, position.zp * 10)\n xyz_str += xyz_line\n\n return xyz_str", "def saveVelocityAndPressureVTK_binary(pressure,u,v,w,x,y,z,filename,dims):\n numEl_size = u.size; numEl = np.prod(numEl_size);\n # open the file and write the ASCII header:\n file = open(filename,'w')\n file.write('# vtk DataFile Version 3.0\\n')\n file.write('VTK file for data post-processed with Python\\n')\n file.write('Binary\\n\\n')\n file.write('DATASET STRUCTURED_GRID\\n')\n file.write('DIMENSIONS %d %d %d \\n'%(dims[0],dims[1],dims[2]))\n file.write('POINTS %d float\\n'%(numEl))\n file.close()\n \n # append binary x,y,z data\n file = open(filename,'ab')\n for i in range(len(x)): # there really needs to be a better way.\n pt = [x[i],y[i],z[i]]\n pt_buf = array('f',pt)\n pt_buf.byteswap()\n file.write(pt_buf)\n \n \n file.close()\n \n # append an ASCII sub header\n file = open(filename,'a')\n file.write('POINT_DATA %d \\n'%numEl)\n file.write('VECTORS velocity_vectors float\\n')\n file.close()\n \n # append binary u,v,w data\n file = open(filename,'ab')\n for i in range(len(u)):\n pt = [u[i],v[i],w[i]]\n pt_buf = array('f',pt)\n pt_buf.byteswap()\n file.write(pt_buf)\n \n file.close()\n \n # append ASCII sub header for scalar velocity magnitude data\n file = open(filename,'a')\n file.write('SCALARS VelocityMagnitude float\\n')\n file.write('LOOKUP_TABLE default\\n')\n \n file.close()\n \n file = open(filename,'ab')\n v_mag = np.sqrt(u**2+v**2+w**2)\n file = open(filename,'ab')\n p_buf = array('f',v_mag); p_buf.byteswap()\n file.write(p_buf)\n file.close()\n \n \n # append another ASCII sub header for the scalar pressure data\n file = open(filename,'a')\n file.write('SCALARS Pressure float\\n')\n file.write('LOOKUP_TABLE default\\n')\n file.close()\n \n # append binary pressure data\n file = open(filename,'ab')\n p_buf = array('f',pressure); p_buf.byteswap()\n file.write(p_buf)\n file.close()", "def PrintOutput(self):\n self.file_settings[\"file_name\"].SetString(self.file_name)\n file = TimeBasedAsciiFileWriterUtility(self.model_part, self.file_settings, self._GetHeader()).file\n for point, var_values in zip(self.found_positions, self.values):\n file.write(self._DataToString(point, var_values))\n file.close()", "def vec_node(self):\r\n\r\n xv = np.arange(self.ox, self.lx + self.ox + self.dx, self.dx)\r\n yv = np.arange(self.oy, self.ly + self.oy + self.dy, self.dy)\r\n zv = np.arange(self.oz, self.lz + self.oz + self.dz, self.dz)\r\n\r\n return xv, yv, zv", "def output(self):\n to_write = 'C '\n \"\"\"\n print self.def_field\n for key in self.def_field:\n print key,\"=\", self.def_field[key]\n \"\"\"\n to_write += str(self.offset[0] + self.def_field['x'])+' '\n to_write += str(self.offset[1] + self.def_field['y'])+' '\n to_write += str(self.def_field['radius'])+' '\n to_write += str(self.def_field['part'])+' '\n to_write += str(self.def_field['dmg'])+' '\n to_write += str(self.def_field['pen'])+' '\n to_write += self.def_field['fill']+'\\n'\n return to_write", "def write_xyz(filename, atoms, coordinates, frame='no default set'):\n\n if len(atoms) != len(coordinates):\n raise ValueError('Number of atoms is different than number of positions')\n\n xyz_file = open(filename,'a')\n xyz_file.write('{}\\n'.format(len(atoms)))\n xyz_file.write('frame {}\\n'.format(frame))\n for i in range(len(atoms)):\n xyz_file.write('{}\\t{}\\t{}\\t{}\\n'.format(atoms[i], coordinates[i][0],\n coordinates[i][1], coordinates[i][2]))\n xyz_file.close()" ]
[ "0.6510679", "0.62536174", "0.6112638", "0.5977228", "0.59565", "0.59240866", "0.59199125", "0.58994395", "0.58610994", "0.5860792", "0.5841133", "0.58250743", "0.57749456", "0.5760919", "0.57023495", "0.56638163", "0.56223685", "0.56162715", "0.55931556", "0.55753946", "0.55427563", "0.5509896", "0.54817075", "0.54799837", "0.54617655", "0.54047984", "0.5386403", "0.53836226", "0.5380529", "0.53744215" ]
0.64434624
1
Retrieve all restaurants in file as a RestaurantCatalog
def get_all(self) -> RestaurantCatalog: catalog = RestaurantCatalog() catalog.add_many(list(self.__data.values())) return catalog
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_restaurant(file):\r\n name_to_rating = {}\r\n price_to_names = {'$':[], '4$':[],'$$$':[],'$$':[]}\r\n cuisine_to_names = {}", "def restaurants_all() -> str:\n restaurant_objects = restaurants.load_restaurants()\n return jsonify(restaurant_objects)", "def get_all_restaurants():\n return list(Restaurant.objects.all().values())", "def get_ratings(filename):\n\n all_ratings = {}\n\n with open(filename) as f:\n for line in f:\n line = line.rstrip()\n restaurant, rating = line.split(\":\")\n all_ratings[restaurant] = rating\n\n return all_ratings", "def test_get_restaurants(self):\n address = {'number': '375',\n 'street': 'Noe St',\n 'city': 'San Francisco',\n 'zip': '94114'}\n\n with self.app.app_context():\n restaurants = ordrin.get_restaurants(address)\n\n # Ordr.in returns a test entry as the first item in the list when\n # when hitting their testing servers.\n entry = restaurants[0]\n self.assertEquals(entry['na'], 'Test Merchant 20130315')\n self.assertEquals(entry['id'], 23917)", "def read_csv(filename):\n restaurants = []\n with open(filename, newline='') as f:\n reader = csv.reader(f)\n for row in reader:\n restaurants.append(row)\n restaurants.pop(0)\n return restaurants", "def recommend(file, price, cuisines_list):\r\n\r\n #read the file.Build data structures\r\n name_to_rating, price_to_names, cuisine_to_names = read_restaurant(file)\r\n\r\n\r\n #look for price\r\n #price: look up the list of restaurant names for the price\r\n\r\n #Now we have a list of restaurants in the right price range\r\n #Need a new list of restaurants that serve one of the cuisines\r\n\r\n\r\n #Need to look at ratings and sort this list\r\n\r\n\r\n #Return the sorted list\r", "def __read_temp_data(self) -> dict:\n\n out = dict()\n\n with open(self.FILE_NAME) as json_file:\n data = json.load(json_file)\n \n for entry in data:\n restaurant = Restaurant.from_json(entry)\n out[restaurant.name_seq_nr] = restaurant\n\n return out", "def read_existing_reservations():\n reservations = []\n list_of_lines = open(\"reservations.csv\").readlines()\n for each in list_of_lines:\n reservations.append(parse_one_record(each.strip()))\n return reservations", "def return_restaurant_rating_dictionary(filename):\n\n the_file = open(filename)\n\n for line in the_file:\n line = line.rstrip()\n ratings = line.split(\":\")\n\n restaurant_name = ratings[0]\n rating = ratings[1]\n restaurant_ratings[restaurant_name] = rating\n\n return restaurant_ratings", "def read_all():\n # Get the list of movies from our DB\n movies = Movie.query.order_by(Movie.movie_id).all()\n\n # Serialize for the response\n movie_schema = MovieSchema(many=True)\n return movie_schema.dump(movies)", "def get_restaurants(term, lat=\"37.788744\", lon=\"-122.411587\", radius=\"805\"):\n\n # Create OAuth2 token and store in session (we don't need to get a new one\n # for every API request)\n\n access_token = get_access_token()\n\n if not SEEDING:\n if \"access_token\" not in session:\n session[\"access_token\"] = access_token\n\n base_url = \"https://api.yelp.com/v3/businesses/search\"\n\n # Create a Unix timestamp for current day at 1:00 PM\n year = datetime.now().year\n day = datetime.now().day\n month = datetime.now().month\n open_time = datetime(year, month, day, 13, 0, 0)\n\n unix_time = time.mktime(open_time.timetuple())\n unix_time_trunc = int(unix_time)\n\n # Set parameters for our request to the business search API.\n parameters = {\n \"latitude\": lat,\n \"longitude\": lon,\n \"radius\": radius,\n \"term\": term,\n \"categories\": \"restaurants\",\n \"limit\": 24,\n \"price\": \"1,2,3\",\n \"sort_by\": \"distance\",\n \"open_at\": unix_time_trunc,\n }\n\n # FIXME: Store resulting JSON data in database...\n\n # Fetch all restaurants that fit these parameters and capture the response.\n response = requests.get(url=base_url,\n params=parameters,\n headers={\n 'Authorization': 'Bearer {token}'.format(\n token=access_token)\n })\n\n # Extract just the business info.\n return response.json()['businesses']", "def get_restaurants():\n restaurants = []\n start = 0\n\n while(True):\n response = requests.get(REQUEST_URL + \"&start=\" + str(start), \\\n headers=HEADERS)\n response_body = json.loads(response.text)\n if (response_body[\"results_shown\"] < 1):\n break\n \n restaurants += response_body[\"restaurants\"] \n start += 20\n\n return restaurants", "def getCatalogs():", "def read_foods(foods_txt):\n foods = []\n for line in foods_txt:\n ingredients_txt, allergens_txt = line.split(\" (contains \")\n ingredients = ingredients_txt.split()\n allergens = allergens_txt[:-1].split(\", \")\n\n foods.append((ingredients, allergens))\n\n return foods", "def read_all():\r\n categories = Category.query.all()\r\n # Serialize the data for the response\r\n category_schema = CategorySchema(many=True)\r\n print('***********************************************************')\r\n return category_schema.dump(categories)", "def read_file(reviews):\n\n for review in reviews:\n yield json.loads(review)[\"text\"]", "def read_input_files(input_file: str) -> list[Food]:\n with open(input_file) as input_fobj:\n foods = [Food.from_raw(line.strip()) for line in input_fobj]\n return foods", "def getRestaurantAddresses(restaurants):\n addresslist = []\n for rest in restaurants:\n if 'address' in rest:\n addressstring = str(rest['address']) + ' ' + str(rest['city'])\n addresslist.append(addressstring)\n\n # pprint.pprint(addresslist)\n return addresslist", "def load_restaurants():\n try:\n with open(CACHE_FILE) as infile:\n print(\"Cache found, loading from file {}\".format(CACHE_FILE))\n restaurants = json.load(infile)\n except Exception:\n print(\"No cache found, loading from API\")\n restaurants = get_restaurants()\n with open(CACHE_FILE, 'w+') as outfile:\n json.dump(restaurants, outfile)\n return restaurants\n return restaurants", "def load_recipes_from_file(cls, args):\n with open(args.recipes_file, 'r') as f:\n reader = csv.DictReader(f)\n for row in reader:\n cls._recipes.append(row)\n cls._add_indices_to_recipes()\n cls._initialize_recipes_status()\n logging.info(\"Recipes loaded.\")", "def load_restaurants(city):\n session = connect_db()\n # Start offset at 0 to return the first 20 results from Yelp API request\n offset = 0\n\n # Get total number of restaurants for this city\n bearer_token = obtain_bearer_token(API_HOST, TOKEN_PATH)\n result_len = 20\n \n # Get all restaurants for a city and load each restaurant into the database\n # Note: Yelp has a limitation of 1000 for accessible results, so get total results\n # if less than 1000 or get only 1000 results back even if there should be more\n while (1000 > offset) and (result_len==20):\n results = search(bearer_token, 'restaurant', city, offset)\n result_len = len(results['businesses'])\n\n # API response returns a SearchResponse object with accessible attributes\n # response.businesses returns a list of business objects with further attributes\n for business in results['businesses']:\n biz = get_business(bearer_token, business['id'])\n try:\n table.insert(biz)\n except DuplicateKeyError:\n print 'DUPS!'\n\n hour_start_monday = None\n hour_end_monday = None \n hour_start_tuesday = None\n hour_end_tuesday = None\n hour_start_wednesday = None\n hour_end_wednesday = None \n hour_start_thursday = None\n hour_end_thursday = None \n hour_start_friday = None\n hour_end_friday = None \n hour_start_saturday = None\n hour_end_saturday = None \n hour_start_sunday = None\n hour_end_sunday = None\n try:\n yelp_price_level = biz['price']\n except:\n yelp_price_level = None\n try:\n hours_type = biz['hours'][0]['hours_type']\n is_open_now = biz['hours'][0]['is_open_now']\n for item in biz['hours'][0]['open']:\n if item['day'] == 1:\n hour_start_tuesday = item['start']\n hour_end_tuesday = item['end']\n elif item['day'] == 0:\n hour_start_monday = item['start']\n hour_end_monday = item['end']\n elif item['day'] == 2:\n hour_start_wednesday = item['start']\n hour_end_wednesday = item['end']\n elif item['day'] == 3:\n hour_start_thursday = item['start']\n hour_end_thursday = item['end']\n elif item['day'] == 4:\n hour_start_friday = item['start']\n hour_end_friday = item['end']\n elif item['day'] == 5:\n hour_start_saturday = item['start']\n hour_end_saturday = item['end']\n elif item['day'] == 6:\n hour_start_sunday = item['start']\n hour_end_sunday = item['end']\n except:\n hours_type = None\n is_open_now = None\n hour_start_monday = None\n hour_end_monday = None \n hour_start_tuesday = None\n hour_end_tuesday = None\n hour_start_wednesday = None\n hour_end_wednesday = None \n hour_start_thursday = None\n hour_end_thursday = None \n hour_start_friday = None\n hour_end_friday = None \n hour_start_saturday = None\n hour_end_saturday = None \n hour_start_sunday = None\n hour_end_sunday = None\n restaurant = Restaurant(\n yelp_id = business['id'],\n yelp_rating = biz['rating'],\n yelp_review_count = biz['review_count'],\n name = biz['name'],\n phone = biz['phone'],\n yelp_url = biz['url'],\n yelp_price_level = yelp_price_level,\n latitude = biz['coordinates']['latitude'],\n longitude = biz['coordinates']['longitude'],\n hours_type = hours_type,\n is_open_now = is_open_now,\n hour_start_monday = hour_start_monday,\n hour_end_monday = hour_end_monday,\n hour_start_tuesday = hour_start_tuesday,\n hour_end_tuesday = hour_end_tuesday,\n hour_start_wednesday = hour_start_wednesday,\n hour_end_wednesday = hour_end_wednesday, \n hour_start_thursday = hour_start_thursday,\n hour_end_thursday = hour_end_thursday, \n hour_start_friday = hour_start_friday,\n hour_end_friday = hour_end_friday, \n hour_start_saturday = hour_start_saturday,\n hour_end_saturday = hour_end_saturday, \n hour_start_sunday = hour_start_sunday,\n hour_end_sunday = hour_end_sunday, \n is_closed = biz['is_closed'],\n categories = biz['categories'][0]['alias'],\n display_phone = biz['display_phone'],\n location = ' '.join(biz['location']['display_address']),\n location_city = biz['location']['city'],\n location_state = biz['location']['state'],\n location_zip_code = biz['location']['zip_code'],\n location_city_id = biz['location']['city'] + ', ' + biz['location']['state'])\n session.merge(restaurant)\n # Yelp returns only 20 results each time, so need to offset by 20 while iterating\n offset += 20\n print('current offset: ', offset)\n session.commit()", "def read_all():\n # Query the database for all the movies\n movies = Movies.query.order_by(db.asc(Movies.id)).limit(100).all()\n\n # Serialize the list of movies from our data\n movies_schema = MoviesSchema(many=True)\n data = movies_schema.dump(movies)\n return data", "def read (self, path):\n\n\t\tself.data = []\n\t\t# print \"*** path: %s***\" % path\n\t\tdir, filename = os.path.split (path)\n\t\troot, ext = os.path.splitext (filename)\n\t\t# encoding = 'ISO-8859-1' # utf-8\n\t\ts = codecs.open(path,'r', self.encoding).read()\n\t\t## s = unicode(f.read(),'utf-8')\n\t\ts = self.preprocess (s)\n\t\tlines = split (s, self.linesep)\n\t\tschema = self.splitline(lines[0])\n\n\t\t## print \"** %s **\" % os.path.splitext(filename)[0]\n\t\tif self.verbose:\n\t\t\tprint \"read %d lines from %s\" % (len(lines), path)\n\n\t\tfor i in range(1,len(lines)):\n\t\t\tif not lines[i].strip(): \n\t\t\t\t# print 'skipping line (%d)' % i\n\t\t\t\tcontinue\n\t\t\tfields = self.splitline(lines[i])\n\t\t\titem = self.entry_class (fields, schema)\n\t\t\tif self.accept (item):\n\t\t\t\tself.add (item)\n\n\t\tself.schema = schema\n\t\t# self.data.sort (lastNameCmp)", "def __loadFromFile(self):\n try:\n f=open(self.__fileR, \"r\")\n line =f.readline().strip()\n rez=[]\n while line!=\"\":\n attrs=line.split(\",\")\n rt=Rent(attrs[0], attrs[1], attrs[2], attrs[3])\n rez.append(rt)\n line=f.readline().strip()\n f.close()\n return rez\n #the file cannot be reached\n except IOError:\n return None", "def collect_data(self, data: Restaurant) -> Restaurant:\n print('-' * 40)\n print(f'{data.name} | {data.pnr}')\n params = {\n 'produ': data.pnr,\n 'country': 'dk',\n 'token': FilterXMLConfig.cvrapi_api_key()\n }\n headers = {\n 'User-Agent': 'sw814f21 - FindSmiley app - Jonas Andersen'\n }\n\n res = get(self.URL, params=params, headers=headers)\n content = json.loads(res.content.decode('utf-8'))\n\n if res.status_code == 200:\n for appender in self.appenders:\n data = appender(content, data)\n else:\n print(f'Skipping restaurant with p-nr {data.pnr}: record not found remotely')\n\n return super().collect_data(data)", "def loadTaxi(file):\n arr = []\n with open(file, newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n row['fare_amount'] = float(row['fare_amount'])\n row['pickup_longitude'] = float(row['pickup_longitude'])\n row['pickup_latitude'] = float(row['pickup_latitude'])\n row['dropoff_longitude'] = float(row['dropoff_longitude'])\n row['dropoff_latitude'] = float(row['dropoff_latitude'])\n row['pickup_datetime'] = datetime.strptime(\n row['pickup_datetime'], '%Y-%m-%d %H:%M:%S %Z')\n arr.append(row)\n\n inserted_ids = db.taxi.insert_many(arr).inserted_ids\n print(\"{} taxi docs inserted\".format(len(inserted_ids)))", "def load_foods(self, file_name):\n with open(file_name, 'r') as input_file:\n input_reader = csv.reader(input_file)\n for row in input_reader:\n # check if the row is a properly formatted csv line, deals with blank lines in the file\n if row:\n try:\n self.foods.append(FoodItem(row[0], row[1], row[2]))\n except IndexError:\n self.foods.append(FoodItem(row[0], row[1]))", "def load_items(self, filename):\n with open(filename, \"r\") as f:\n itemss = []\n for line in f:\n line = line.strip()\n # Add name, description and initial location to each item object\n if line.upper():\n name = line\n line = f.readline()\n line = line.strip()\n description = line\n line = f.readline()\n line = line.strip()\n initial_room_id = line\n item = Item(name, description, initial_room_id)\n itemss.append(item)\n line = f.readline()\n return itemss", "def process_file(file_name):\n \n restaurant_ratings = {}\n \n # open file, iterate line by line\n restaurant_file = open(file_name)\n # split by colon, returns a list\n for line in restaurant_file:\n restaurant_name, restaurant_rating = line.rstrip().split(\":\")\n restaurant_ratings[restaurant_name] = int(restaurant_rating)\n\n # close file\n restaurant_file.close()\n return restaurant_ratings" ]
[ "0.7021534", "0.6264984", "0.5770384", "0.5722723", "0.56450224", "0.564491", "0.5568687", "0.5530623", "0.551046", "0.5477675", "0.54734993", "0.5457776", "0.54333824", "0.5407601", "0.5406789", "0.5387293", "0.538584", "0.53767186", "0.53694475", "0.5349914", "0.53391904", "0.53100693", "0.52999383", "0.5279023", "0.5262346", "0.5262048", "0.52387017", "0.52256894", "0.5223813", "0.521606" ]
0.67553145
1
Checks the if the first column of a csv file appears to contain timestamps. Refactored to include __csv_process_header functionality.
def csv_has_timestamps(file_name: str, sample_size: int) -> (bool, int, list): # keep track of how many entries pandas # can and can't parse as datetime confirmed_timestamps = 0 confirmed_non_timestamps = 0 header_lines = 0 columns_list = ["Timestamp", "Value"] with open(file_name) as csvfile: for i in range(sample_size): try: # read each line from the file, prepare it, # then pass it to pandas.to_datetime() # if it can be parsed, we've encountered a timestamp entry line = csvfile.readline() line = line.rstrip() pd.to_datetime(line.split(",")[0]) confirmed_timestamps += 1 except ValueError: # pandas.to_datetime() will return ValueError if it can't # parse the argument, # this means we've encountered a non-timestamp entry confirmed_non_timestamps += 1 # if the first line can't be parsed, # until we confirm timestamps or not, # take this line as the column headers if header_lines == 0: columns_list = line.split(",") # keep track of how many non-data lines header_lines += 1 # very simple check: if we encountered more timestamps than not, # the primary entry for the first column must be timestamps if confirmed_timestamps > confirmed_non_timestamps: return True, header_lines, columns_list else: return False, header_lines, columns_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _guess_time_format(self, csv_file):\n import csv\n\n fmt_lst = ['%Y/%m/%d %H:%M', '%Y-%m-%d %H:%M:%S']\n\n fmt_found = None\n\n with open(csv_file,'r') as fp:\n reader = csv.DictReader(fp)\n for i,line in enumerate(reader):\n for k,v in line.iteritems():\n if k.find('date')>=0: #-- this should be the date column\n date_str = v\n break\n if i>0:\n break\n\n msg = \"found first date in file ---{}---\".format(v)\n FileLogger.info(msg)\n\n for fmt in fmt_lst:\n try:\n dt.datetime.strptime(date_str,fmt)\n fmt_found = fmt\n break\n except ValueError:\n pass\n\n msg = \"detected time-format '{}'\".format(fmt_found)\n FileLogger.info(msg)\n\n return fmt_found", "def test_timestamp_not_found(self, l):\n extract_columns(data=self.data, columns=['a'], timestamps=['timestamp'])\n l.check(\n ('pynts.util', 'WARNING', \"Couldn't find timestamps '['timestamp']' in data, using 'ts' instead\"),\n )", "def has_timestamp(self):\n return (self.data_type() & 0x100 == 0x100) and (self.raw_data_length() >= 8)", "def has_header_row(filepath, skiprows=[0]):\n if skiprows:\n skiprows=max(skiprows)\n else:\n skiprows = 0\n with open(filepath) as f:\n reader = csv.reader(f)\n\n data = list(reader)[skiprows:skiprows+10] # don't look beyond line 10\n \n def isnumber(item):\n try:\n float(item)\n return True\n except:\n return False\n \n first_line = data[0]\n second_line = data[1]\n \n first_line = [isnumber(item) for item in first_line]\n second_line = [isnumber(item) for item in second_line]\n \n\n if not first_line == second_line:\n return 0\n else:\n return None", "def check_valid_csv_header(self, row):\n obj = re.match(re.compile('^Year\\,Month\\,.'),\n ','.join(row))\n if not obj:\n raise Exception(\"Invalid Headers must be `Year` `Month` Check Sample file\")", "def test_no_timestamp(self):\n self.assertRaises(PyntsError, extract_columns, self.data[['a', 'b']], ['a'], ['timestamp'])", "def timestamp_line(content):\n return re.match(r\"((\\d\\d:){2}\\d\\d),(\\d{3}) --> ((\\d\\d:){2}\\d\\d),(\\d{3})\", content) is not None", "def _is_probably_old_datfile_format(raw_data):\n return not _is_probably_new_datfile_format(raw_data) and \"UTC\" in raw_data", "def check_date_column(cls, line):\n\n try:\n date_val = cls.get_field(line, cls._DATE_COL_NAME)\n except (ValueError, IndexError, KeyError) as exc:\n logging.warning(exc)\n return False\n else:\n return date_val is not None", "def has_header_row(self, strdata):\n debug = False\n comma_dec_sep_ok = True\n if debug: print(strdata)\n if len(strdata) < 2: ## a header row needs a following row to be a header\n return False\n row1_types = [lib.get_val_type(val, comma_dec_sep_ok) \n for val in strdata[0]]\n row2_types = [lib.get_val_type(val, comma_dec_sep_ok) \n for val in strdata[1]]\n str_type = mg.VAL_STRING\n empty_type = mg.VAL_EMPTY_STRING\n non_str_types = [mg.VAL_DATE, mg.VAL_NUMERIC]\n return importer.has_header_row(\n row1_types, row2_types, str_type, empty_type, non_str_types)", "def datetime_column(filepath, skiprows, skipcolumns):\n df = pd.read_csv(filepath, skiprows=skiprows)\n df = df.drop(columns = skipcolumns)\n# df = df.head(10)\n \n# return df\n\n def try_parse(df):\n# print(df.iloc[1, :])\n # try parsing some rows from each column as date\n head = df.head()\n tail = df.tail()\n for column in df.columns:\n try:\n# print(dateutil.parser.parse(df[column].iloc[-1]))\n dt_head = dateutil.parser.parse(head[column].iloc[-1])\n dt_tail = dateutil.parser.parse(tail[column].iloc[-1])\n# print('possible datetime')\n# if not date.time() == datetime.time():\n if not dt_head.time() == dt_tail.time():\n if not dt_head.date() == dt_tail.date():\n # time seems to be present (not default parser value)\n return column\n except:\n continue\n return None\n \n # try without modifying values\n rv = try_parse(df=df)\n if rv:\n return rv\n \n # try modifying values\n chars = ['-', '_', '/', '#']\n for char in chars:\n dfc = df.copy()\n for col in dfc.columns:\n try:\n dfc[col] = dfc[col].str.split(char).str.join(' ')\n except:\n pass # will only work for str type\n# print(char, dfc.iloc[1, :])\n rv = try_parse(df=dfc)\n if rv:\n return rv", "def splitTS(TSfile,csvNAME='TS1',dirname='./',prefix=\"@\",\n BEG=None,END=None):\n\n dfts=pd.read_csv(TSfile,sep=\" \",index_col=0)\n dfts.columns = pd.to_datetime(dfts.columns)\n\n cols=dfts.columns[np.logical_and(dfts.columns >= pd.to_datetime(BEG),\n dfts.columns <= pd.to_datetime(END))]\n\n dfts=dfts[cols]\n\n\n for row in dfts.index:\n dfts.loc[[row]].to_csv(dirname+\"/\"+prefix+row,header=None,index=None,sep=\" \")\n\n return", "def table_has_any_timestamp_fields(table_object) -> bool:\n mapper = sqlalchemy.inspect(table_object)\n for column in mapper.all_orm_descriptors:\n try:\n if isinstance(column.type, PSQL_TIMESTAMP) or isinstance(column.type, SQLITE_TIMESTAMP):\n return True\n except Exception:\n pass\n return False", "def load_obs_csv(self, csv_file, date_fmt=\"%Y/%m/%d %H:%M\", mission_lst=None, only_geom=False):\n\n try:\n obs_data = np.loadtxt(csv_file, delimiter=',', dtype='str')\n msg = \"observation data loaded from file ***{}***\".format(csv_file)\n FileLogger.info(msg)\n except IOError as exc:\n msg = \"could not load observations from csv file ***{}***\".format(csv_file)\n msg += \" ({})\".format(exc)\n FileLogger.fatal(msg)\n raise RuntimeError(msg)\n\n nt,ncol = obs_data.shape\n date_lst = [ dt.datetime.strptime(obs_data[i,0], date_fmt) for i in xrange(nt) ]\n date_a = np.array(date_lst)\n time_start_data = date_lst[0]\n time_end_data = date_lst[-1]\n #-- logging\n msg = \"detected ntimepts={} #columns={} in csv file\".format(nt, ncol)\n FileLogger.info(msg)\n\n #-- potential adjustment to specified temporal domain\n if self.time_start!=None:\n time_start = self.time_start\n else:\n time_start = time_start_data\n if self.time_end!=None:\n time_end = self.time_end\n else:\n time_end = time_end_data\n\n #-- first 8 columns are always:date, vza, vaa, sza, saa, sat_flag, lat, lon\n\n if ncol==10:\n msg = \"start reading S1 observations...\"\n FileLogger.info(msg)\n # date, vza, vaa, sza, saa, sat_flag, lat, lon, vh, vv\n vh_lst = []\n vv_lst = []\n self.obs_dct['S1'] = ObsTable()\n self.obs_dct['S1'].geom = satgeo.SensorGeometry()\n self.obs_dct['S1'].sat_id_lst = []\n #-- abreviate\n sat_geom = self.obs_dct['S1'].geom\n sat_geom.date_utc = []\n sat_geom.vza = []\n sat_geom.vaa = []\n sat_geom.sza = []\n sat_geom.saa = []\n for i,act_date in enumerate(date_lst):\n if act_date<time_start:\n continue\n elif act_date>time_end:\n break\n #-- actual satellite/mission\n act_mission = obs_data[i,7].upper()\n if mission_lst!=None and not act_mission in mission_lst:\n msg = \"observation at date {} is from mission={} and ignored here.\".format(\n act_date.strftime('%Y-%m-%dT%H:%M'), act_mission)\n FileLogger.info(msg)\n continue\n #-- read actual geometry\n sat_geom.date_utc.append(act_date)\n sat_geom.vza.append( float(obs_data[i,1]) )\n sat_geom.vaa.append( float(obs_data[i,2]) )\n sat_geom.sza.append( float(obs_data[i,3]) )\n sat_geom.saa.append( float(obs_data[i,4]) )\n #-- lon,lat (columns 5,6) not needed\n #-- satellite flag (column 7)\n self.obs_dct['S1'].sat_id_lst.append(act_mission)\n #-- VH,VV in 0-indexed columns 8,9\n vh_lst.append( float(obs_data[i,8]) )\n vv_lst.append( float(obs_data[i,9]) )\n\n #-- geometries/satellite flags are done here\n if only_geom:\n return\n\n #-- turn into arrays\n vh = np.array(vh_lst)\n vv = np.array(vv_lst)\n #-- logging\n msg = \"observational backscatter values are assumed to be in linear units!\"\n FileLogger.info(msg)\n msg = \"VH backscatter values read: VH[linear] min/max={}/{}\".format(\n vh.min(), vh.max())\n FileLogger.info(msg)\n msg = \"VV backscatter values read: VV[linear] min/max={}/{}\".format(\n vv.min(), vv.max())\n FileLogger.info(msg)\n #-- uncertainty computation\n #-- XX_db = XX_db(XX) = 10*log10(XX)\n #-- XX = XX(XX_db) = 10**(XX_db/10)\n #\n # for the uncertainty in linear/raw unit we apply conservative estimation:\n # 2*sXX = [ XX(XX_db+sXX_db) - XX(XX_db-sXX_db) ] (XX=VH,VV)\n # = [ XX(XX_db)*10**(sXX_db/10.) - XX(XX_db)*10**(-sXX_db/10.)]\n # = XX(XX_db)*[10**(sXX_db/10.) - 10**(-sXX_db/10.)]\n # = XX * [10**(sXX_db/10.) - 10**(-sXX_db/10.)]\n ds = 0.5* (10**(self.s1_unc_db/10.) - 10**(-1*self.s1_unc_db/10.))\n #-- S1 uncertainty floor *may* be user-supplied\n if self.s1_vv_uncfloor!=None:\n dsvv_floor = self.s1_vv_uncfloor\n else:\n dsvv_floor = 10**(self.s1_floor_db/10.)*ds\n if self.s1_vh_uncfloor!=None:\n dsvh_floor = self.s1_vh_uncfloor\n else:\n dsvh_floor = 10**(self.s1_floor_db/10.)*ds\n msg = \"assuming S1 observational uncertainty of {} [dB] \".format(self.s1_unc_db)\n msg += \"yields relative uncertainty of {} [linear unit].\".format(ds)\n FileLogger.info(msg)\n msg = \"assuming vv={} vh={} S1 observational uncertainty floor [linear unit].\".format(\n dsvv_floor, dsvh_floor)\n FileLogger.info(msg)\n svh = np.maximum(vh*ds, dsvh_floor)\n svv = np.maximum(vv*ds, dsvv_floor)\n #-- apply floor value\n nlo_svh = np.count_nonzero(vh*ds<dsvh_floor)\n nlo_svv = np.count_nonzero(vv*ds<dsvv_floor)\n svh = np.maximum(svh, dsvh_floor)\n svv = np.maximum(svv, dsvv_floor)\n msg = \"number of applied uncertainty floor values on VH={} VV={}\".format(\n nlo_svh, nlo_svv)\n FileLogger.info(msg)\n msg = \"determined VH uncertainty in linear units, min/max={}/{}\".format(\n svh.min(), svh.max())\n FileLogger.info(msg)\n msg = \"determined VV uncertainty in linear units, min/max={}/{}\".format(\n svv.min(), svv.max())\n FileLogger.info(msg)\n #-- potential filtering of polarisations\n if not self.s1_pol is None:\n if not 'VH' in self.s1_pol:\n vh = self.obs_fill_value\n svh = self.obs_fill_value\n if not 'VV' in self.s1_pol:\n vv = self.obs_fill_value\n svv = self.obs_fill_value\n #-- \n nt_use = len(sat_geom.date_utc)\n self.obs_dct['S1'].data = np.empty((nt_use,2), dtype=np.float64) #-- 'VH','VV'\n self.obs_dct['S1'].data[:,0] = vh\n self.obs_dct['S1'].data[:,1] = vv\n self.obs_dct['S1'].dataunc = np.empty((nt_use,2), dtype=np.float64)\n self.obs_dct['S1'].dataunc[:,0] = svh\n self.obs_dct['S1'].dataunc[:,1] = svv\n #-- logging\n msg = \"...reading S1 observations DONE\"\n FileLogger.info(msg)\n else:\n #-- logging\n msg = \"start reading S2 observations...\"\n FileLogger.info(msg)\n # date, vza, vaa, sza, saa, sat_flag, lat, lon, BRF1,...,BRF13\n self.obs_dct['S2'] = ObsTable()\n self.obs_dct['S2'].geom = satgeo.SensorGeometry()\n self.obs_dct['S2'].sat_id_lst = []\n #-- abreviate\n sat_geom = self.obs_dct['S2'].geom\n sat_geom.date_utc = []\n sat_geom.vza = []\n sat_geom.vaa = []\n sat_geom.sza = []\n sat_geom.saa = []\n brf_lst = [ [] for i in xrange(NB_S2) ] #-- prepare lists for 13 BRF bands\n for i,act_date in enumerate(date_lst):\n if act_date<time_start:\n continue\n elif act_date>time_end:\n break\n #-- actual satellite/mission\n act_mission = obs_data[i,7].upper()\n if mission_lst!=None and not act_mission in mission_lst:\n msg = \"observation at date {} is from mission={} and ignored here.\".format(\n act_date.strftime('%Y-%m-%dT%H:%M'), act_mission)\n FileLogger.info(msg)\n continue\n #-- read actual geometry\n sat_geom.date_utc.append(act_date)\n sat_geom.vza.append( float(obs_data[i,1]) )\n sat_geom.vaa.append( float(obs_data[i,2]) )\n sat_geom.sza.append( float(obs_data[i,3]) )\n sat_geom.saa.append( float(obs_data[i,4]) )\n #-- lon/lat in columns 5, 6 not used here\n #-- satellite flag\n self.obs_dct['S2'].sat_id_lst.append(obs_data[i,7])\n #-- BRFs start at 0-indexed column 8 in data csv file\n for ib in xrange(NB_S2):\n icol = ib+8\n brf_lst[ib].append( float(obs_data[i, icol]) )\n\n #-- geometries/satellite flags are done here\n if only_geom:\n return\n #--\n nt_use = len(sat_geom.date_utc)\n brf_data = np.empty((nt_use,NB_S2), dtype=np.float64) #-- BRF1-13\n for ib in xrange(NB_S2):\n brf_data[:,ib] = np.array(brf_lst[ib])\n #-- check observational consistency\n nneg = np.count_nonzero( brf_data<0 )\n if nneg>0:\n msg = \"detected negative BRF values: nneg={}.\".format(nneg)\n msg += \" These will be set to fill-value!\"\n FileLogger.warn(msg)\n brf_data[ brf_data<0 ] = self.obs_fill_value\n nhi = np.count_nonzero( brf_data>1 )\n if nhi>0:\n msg = \"detected high BRF outlier values>1: nout={}.\".format(nhi)\n msg += \" These will be set to fill-value!\"\n FileLogger.warn(msg)\n brf_data[ brf_data>1 ] = self.obs_fill_value\n\n #-- data uncertainty\n msg = \"BRF uncertainty is derived by applying {} relative uncertainty, \".format(\n self.s2_relunc)\n msg += \"and an uncertainty floor value of {}\".format(self.s2_uncfloor)\n FileLogger.info(msg)\n brf_dataunc = np.maximum(brf_data*self.s2_relunc, self.s2_uncfloor)\n brf_dataunc[ brf_dataunc<0 ] = self.obs_fill_value\n brf_dataunc[ brf_data==self.obs_fill_value ] = self.obs_fill_value\n #-- restriction to seleted bands\n if not self.s2_bnds is None:\n bnd_msk = np.ones((NB_S2,), dtype=np.bool)*True\n bnd_msk[self.s2_bnds] = False\n brf_data[:,bnd_msk] = self.obs_fill_value\n brf_dataunc[:,bnd_msk] = self.obs_fill_value\n #-- set into structure\n self.obs_dct['S2'].data = brf_data\n self.obs_dct['S2'].dataunc = brf_dataunc\n #-- logging\n msg = \"...reading S2 observations DONE\"\n FileLogger.info(msg)", "def test_with_header_no_schema(self):\n # inferedschema should use first line of the csv as col names\n frame = self.context.frame.import_csv(self.dataset, header=True)\n expected_schema = [(\"1\", int), (\"a\", str), (\"2\", int)]\n self.assertEqual(frame.schema, expected_schema)", "def _checkTimestamp(self, acquisition_time):\n\n #\n # Check for None\n #\n if acquisition_time is None:\n raise Exception('Invalid acquisition_time {acquisition_time}'.\n format(acquisition_time =acquisition_time))\n\n #\n # Do the conversion\n # \n acquisition_time_candidate = (parser.parse(acquisition_time)).timetuple()\n\n #\n # Check several values\n # \n if acquisition_time_candidate.tm_year < 2015:\n raise Exception('Invalid year {year} in acquisition time {acquisition_time}'.\n format(year = acquisition_time_candidate.tm_year, acquisition_time =acquisition_time))\n\n #\n # Return if it passed all tests\n #\n return acquisition_time_candidate", "def _lines_have_temperature(self, line):\r\n\r\n return len(line.split(sep=',')) == 3", "def validate_timestamp(column_name, value, date_format, column_data_type=\"timestamp\"):\n value = value.replace(\"T\", \" \")\n date_format = date_format.replace(\"T\", \" \")\n date_value, time_value = value.split(\" \")\n format_firstpart, format_secondpart = date_format.split('tzh')\n if \"-\" in time_value:\n time_value, tz_value = time_value.split(\"-\")\n value = \"{0} {1}\".format(date_value, time_value)\n\n if time_value is not None and tz_value is not None:\n try:\n datetime.strptime(value, format_firstpart)\n return None\n except ValueError:\n return \"{0} : '{1}' is not a valid {2}\".format(column_name, value, column_data_type)\n elif \"+\" in time_value:\n time_value, tz_value = time_value.split(\"+\")\n value = \"{0} {1}\".format(date_value, time_value)\n if time_value is not None and tz_value is not None:\n try:\n datetime.strptime(value, format_firstpart)\n return None\n except ValueError:\n return \"{0} : '{1}' is not a valid {2}\".format(column_name, value, column_data_type)", "def _validate_time(self, col):\r\n error_rows = [] # initialize list of rows with errors\r\n # Loop through data and validate time values\r\n for i, row in enumerate(self.rows):\r\n csv_row = i + 1\r\n time_of_survey = row[col]\r\n time24hr_pattern = \"^(2[0-3]|[01]?[0-9]):([0-5]?[0-9]):([0-5]?[0-9])$\"\r\n time12hr_pattern = \"^(1[0-2]|0?[1-9]):([0-5]?[0-9]):([0-5]?[0-9])( ?[AP]M)?$\"\r\n\r\n if \"M\" in time_of_survey:\r\n if not re.search(time12hr_pattern, time_of_survey):\r\n error_rows.append(csv_row)\r\n else:\r\n if not re.search(time24hr_pattern, time_of_survey):\r\n error_rows.append(csv_row)\r\n return error_rows", "def tzdata_filter(line: str) -> bool:\n if line and line[0] == 'Z' :\n return True\n return False", "def _validate_date(self, col):\r\n error_rows = [] # initialize list of rows with errors\r\n # Loop through data and validate time values\r\n for i, row in enumerate(self.rows):\r\n csv_row = i + 1\r\n date_of_survey = row[col]\r\n try:\r\n [m, d, y] = date_of_survey.split('/')\r\n testdate = datetime.date(int(y), int(m), int(d))\r\n except:\r\n error_rows.append(csv_row)\r\n return error_rows", "def _check_start_timestamp(self):\n if self.descriptor.type in (\n metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64,\n metric_descriptor.MetricDescriptorType.CUMULATIVE_DOUBLE,\n metric_descriptor.MetricDescriptorType.CUMULATIVE_DISTRIBUTION,\n ):\n for ts in self.time_series:\n if ts.start_timestamp is None:\n raise ValueError(\"time_series.start_timestamp must exist \"\n \"for cumulative metrics\")", "def check_valid_csv_data(self, row):\n obj = re.match(re.compile('^[0-9]{4}\\,[A-Z]{1}[a-z]{2}\\,.'),\n ','.join(row))\n if not obj:\n raise Exception(\"Invalid Data String must be like `1990` `Jan` Check Sample file\")", "def _get_datetime_from_header(header: List[str]) -> Optional[datetime.datetime]:\n date_time = None\n for line in header:\n if date_time is not None:\n break\n matches = re.findall(r'\\d{2}/\\d{2}/\\d{4} \\d{2}:\\d{2}:\\d{2}.\\d+', line)\n if len(matches) == 0:\n continue\n elif len(matches) == 1:\n date_time = datetime.datetime.strptime(matches[0], '%d/%m/%Y %H:%M:%S.%f')\n else:\n raise ValueError('Too many date time values in line')\n\n return date_time", "def is_header(fields):\n if len(fields) < 11:\n return None\n # Test a column which should usually be a number in data lines and never a number in header lines.\n try:\n float(fields[8])\n return False\n except ValueError:\n pass\n first_field = fields[0]\n # An explicitly commented line is a header.\n if first_field.startswith('#'):\n return True\n # The first field in a header is usually these two (and never these in data lines).\n if first_field.lower() == 'sample' or first_field.lower() == 'family':\n return True\n # Fallback 1: There should never be a number in a header line. If we find one, it's a data line.\n for field in fields:\n try:\n float(field)\n return False\n except ValueError:\n pass\n # Fallback 2: Just test whether any of the known labels is in the line.\n for label in LABELS:\n if label in fields:\n return True\n for label in LABELS:\n if label.lower() in fields:\n return True", "def check_valid_csvformat(self, csv_path):\n with open(self.csv_path, \"rb+\") as file_obj:\n reader = csv.reader(file_obj, delimiter=',') # CSV DictReader object\n self.check_valid_csv_header(reader.next())\n self.check_valid_csv_data(reader.next())", "def is_header(line):\n return line[0] == '>'", "def VerifyRow(self, parser_mediator, row):\n if len(row) != self._NUMBER_OF_COLUMNS:\n return False\n\n # If the date and time string cannot be converted into a date time object,\n # then do not consider this to be a McAfee AV Access Protection Log.\n try:\n self._CreateDateTime(row['date'], row['time'])\n except errors.ParseError:\n return False\n\n # Use the presence of these strings as a backup or in case of partial file.\n status = row['status']\n if 'Access Protection' not in status and 'Would be blocked' not in status:\n return False\n\n return True", "def test_csv_reader_header_fields(process_data):\n data = process_data(file_name_or_type='clean_map.csv')\n header_fields = list(data[0].keys())\n assert header_fields == [\n 'Country',\n 'City',\n 'State_Or_Province',\n 'Lat',\n 'Long',\n 'Altitude'\n ]", "def use_long_headers(header_row, long_to_short_dict):\n col_matches = 0\n for value in header_row:\n if FieldCleaner.clean_string(value) in long_to_short_dict:\n col_matches += 1\n # if most of column headers are in the long format,\n # we'll treat the file as having long headers\n return col_matches > .5 * len(header_row)" ]
[ "0.640366", "0.63820136", "0.6264246", "0.61585444", "0.6134612", "0.6077781", "0.6066581", "0.5915225", "0.5872344", "0.57468706", "0.5743505", "0.5737332", "0.57336193", "0.57112074", "0.5702082", "0.56855357", "0.56678337", "0.56237334", "0.5601129", "0.55361897", "0.5533232", "0.54851115", "0.547718", "0.5460599", "0.54570055", "0.54523575", "0.54422045", "0.5440466", "0.5437143", "0.5421179" ]
0.7445921
0
Only called when there are 3 columns of data. This function checks if the second column of the DataFrame python list is comprised of integers, parsable pandas. Timestamp strings, or something not accepted as timerelated. In either of the first two cases, the second column is reassigned to be parsable as the time of a pandas.Timestamp object. If neither of these cases are possible, TypeError is raised.
def __process_times(raw_array: list, num_row: int): # format strings used in datetime.time().strftime() full_time_format = '%H:%M:%S' hours_time_format = '%H:00:00' minutes_time_format = '00:%M:00' seconds_time_format = '00:00:%S' # booleans for telling what the integers in the second column represent integer_hours = False integer_minutes = False integer_seconds = False # boolean for if the second column is comprised of integers integer_timestamps = False # this loop breaks as soon as it finds an integer in the second column # if no integer is found, we try to parse it as a pandas.Timestamp, # if this fails, the entry is not something we can parse for i in range(num_row): time_string = raw_array[i][1] if time_string.isnumeric(): integer_timestamps = True break try: time = pd.Timestamp(time_string) time = time.strftime(full_time_format) raw_array[i][1] = time except ValueError: sys.stdout.write(f"ERROR: {time_string}" f" cannot be parsed as a time value\n") raise TypeError # this will be the first thing we run into after # encountering an integer in the previous loop if integer_timestamps: # we need to find what the maximum value is in the second column # to decide if it represents seconds, minutes, or hours max_value = 0 min_value = 1 # find the max and min value until the max value wraps around to 0 # when this happens: # if max is 60, we're in minutes # if max is 24, we're in hours # else, we're in seconds for i in range(num_row): number = int(raw_array[i][1]) if number < min_value: min_value = 0 if number > max_value: max_value = number else: if number == min_value: if (max_value == 60 and min_value == 1) \ or (max_value == 59 and min_value == 0): integer_minutes = True elif (max_value == 24 and min_value == 1) \ or (max_value == 23 and min_value == 0): integer_hours = True break # it's possible that we reached the end of the file # without looping around to 0, in this case, the max_value # was never reached so we assume this column represents seconds # it is also possible that we wrapped around at 0 and # just didn't meet the conditions to consider the row # as minutes or hours. We'll still use seconds in this case if max_value > 60: integer_seconds = True # basic switch case for putting each entry of # second column into the desired format if integer_seconds: for i in range(num_row): number = int(raw_array[i][1]) raw_array[i][1] = \ datetime.time(number).strftime(seconds_time_format) elif integer_minutes: for i in range(num_row): number = int(raw_array[i][1]) raw_array[i][1] = \ datetime.time(number).strftime(minutes_time_format) elif integer_hours: for i in range(num_row): number = int(raw_array[i][1]) raw_array[i][1] = \ datetime.time(number).strftime(hours_time_format) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_timestamp(column_name, value, date_format, column_data_type=\"timestamp\"):\n value = value.replace(\"T\", \" \")\n date_format = date_format.replace(\"T\", \" \")\n date_value, time_value = value.split(\" \")\n format_firstpart, format_secondpart = date_format.split('tzh')\n if \"-\" in time_value:\n time_value, tz_value = time_value.split(\"-\")\n value = \"{0} {1}\".format(date_value, time_value)\n\n if time_value is not None and tz_value is not None:\n try:\n datetime.strptime(value, format_firstpart)\n return None\n except ValueError:\n return \"{0} : '{1}' is not a valid {2}\".format(column_name, value, column_data_type)\n elif \"+\" in time_value:\n time_value, tz_value = time_value.split(\"+\")\n value = \"{0} {1}\".format(date_value, time_value)\n if time_value is not None and tz_value is not None:\n try:\n datetime.strptime(value, format_firstpart)\n return None\n except ValueError:\n return \"{0} : '{1}' is not a valid {2}\".format(column_name, value, column_data_type)", "def __convert_timestamps(data_frame: pd.DataFrame) -> pd.DataFrame:\n\n # get 2D array of data_frame as python builtin list,\n # get columns list to use for second pd.DataFrame constructor\n raw_array = data_frame.to_numpy(dtype=str).tolist()\n columns_list = data_frame.columns.tolist()\n\n # num_rows used to iterate through array,\n # num_cols used for naive error checking\n num_rows = len(raw_array)\n num_cols = len(raw_array[0])\n\n # here is why it is important to error check DataFrames\n # for correct dimensions before passing to this function\n # __convert_two and convert_tree_cols() both have\n # a different process and could hit errors or produce undefined behavior\n if num_cols == 3:\n __convert_three_cols(raw_array, num_rows)\n # convert_three_cols deletes a column, update our list of column names\n columns_list.remove(columns_list[1])\n elif num_cols == 2:\n __convert_two_cols(raw_array, num_rows)\n else:\n sys.stdout.write(f\"ERROR: GIVEN CSV FILE MUST CONTAIN TWO\"\n f\" OR THREE COLUMNS (NOT A TIME SERIES)\\n\"\n f\"(files with three columns are assumed to have date\"\n f\" in the first column and a time in the second)\\n\")\n raise NotImplementedError\n\n # once columns have been processed, use array and list of column names\n # to reassign data_frame to a new constructor\n data_frame = pd.DataFrame(data=raw_array, columns=columns_list)\n\n return data_frame", "def correct_type(data):\n\n if all(data[col].dtypes == data.dtypes[0] for col in data.columns):\n if all(data[col].isnull().sum() == 0 for col in data.columns):\n print('All columns have values of the correct type.')\n else:\n print('Bad result.')", "def check_data_type_column_data(X):\n if type(X) is not numpy.ndarray:\n raise TypeError(\"X should be type numpy.ndarray\")\n\n if len(X.shape) == 2 and X.shape[1] > 1:\n raise TypeError(\"X should have a single column.\")", "def test_no_timestamp(self):\n self.assertRaises(PyntsError, extract_columns, self.data[['a', 'b']], ['a'], ['timestamp'])", "def test_timestamp_not_found(self, l):\n extract_columns(data=self.data, columns=['a'], timestamps=['timestamp'])\n l.check(\n ('pynts.util', 'WARNING', \"Couldn't find timestamps '['timestamp']' in data, using 'ts' instead\"),\n )", "def _maybe_dt_data(self, data, feature_names, feature_types,\n meta=None, meta_type=None):\n if meta and data.shape[1] > 1:\n raise ValueError(\n 'DataTable for label or weight cannot have multiple columns')\n if meta:\n # below requires new dt version\n # extract first column\n data = data.to_numpy()[:, 0].astype(meta_type)\n return data, None, None\n\n data_types_names = tuple(lt.name for lt in data.ltypes)\n bad_fields = [data.names[i]\n for i, type_name in enumerate(data_types_names)\n if type_name not in self.dt_type_mapper]\n if bad_fields:\n msg = \"\"\"DataFrame.types for data must be int, float or bool.\n Did not expect the data types in fields \"\"\"\n raise ValueError(msg + ', '.join(bad_fields))\n\n if feature_names is None and meta is None:\n feature_names = data.names\n\n # always return stypes for dt ingestion\n if feature_types is not None:\n raise ValueError(\n 'DataTable has own feature types, cannot pass them in.')\n feature_types = np.vectorize(self.dt_type_mapper2.get)(\n data_types_names)\n\n return data, feature_names, feature_types", "def _check_dtype(self):\n\n # assert valid dtype\n if self.dtype not in PRIMITIVE_TYPES:\n raise ValueError(\"Type '{}' is invalid. Following types are \"\n \"allowed: {}\"\n .format(self.dtype, PRIMITIVE_TYPES.keys()))\n\n # assert valid dtypes for values\n allowed_types = PRIMITIVE_TYPES[self.dtype]\n\n for value in self.values:\n if not isinstance(value, allowed_types):\n raise TypeError(\"Column '{}' has invalud value '{}' with \"\n \"invalid type '{}'. Allowed types are: {}.\"\n .format(self.name,\n value,\n type(value),\n allowed_types))", "def format_column(data,col_num):\n assert isinstance(col_num,int) and col_num>=0\n \n if col_num in [0,1,11,13,14,15,16,17,19,20,21,28,31,45,46,47,48]: return data #emptry string will NOT return None\n if col_num in [2,3,12,18]: return type_cast(lambda x: int(float(x)),data)\n if col_num in [6,7,8,9,10,23,24,25,26,27,29,30]: return type_cast(float,data)\n if col_num in [4,5,22]: return type_cast(datetime.strptime,data,'%Y-%m-%d %H:%M:%S')\n if col_num in range(32,45):\n if data=='False': return False #bool('False') returns True!\n elif data=='True': return True\n else: return None", "def checktypestest(chosen_df):\n for i in chosen_df:\n if not chosen_df.dtypes[1] == chosen_df.dtypes[i]:\n raise ValueError('Types do not match')", "def datetime_column(filepath, skiprows, skipcolumns):\n df = pd.read_csv(filepath, skiprows=skiprows)\n df = df.drop(columns = skipcolumns)\n# df = df.head(10)\n \n# return df\n\n def try_parse(df):\n# print(df.iloc[1, :])\n # try parsing some rows from each column as date\n head = df.head()\n tail = df.tail()\n for column in df.columns:\n try:\n# print(dateutil.parser.parse(df[column].iloc[-1]))\n dt_head = dateutil.parser.parse(head[column].iloc[-1])\n dt_tail = dateutil.parser.parse(tail[column].iloc[-1])\n# print('possible datetime')\n# if not date.time() == datetime.time():\n if not dt_head.time() == dt_tail.time():\n if not dt_head.date() == dt_tail.date():\n # time seems to be present (not default parser value)\n return column\n except:\n continue\n return None\n \n # try without modifying values\n rv = try_parse(df=df)\n if rv:\n return rv\n \n # try modifying values\n chars = ['-', '_', '/', '#']\n for char in chars:\n dfc = df.copy()\n for col in dfc.columns:\n try:\n dfc[col] = dfc[col].str.split(char).str.join(' ')\n except:\n pass # will only work for str type\n# print(char, dfc.iloc[1, :])\n rv = try_parse(df=dfc)\n if rv:\n return rv", "def test_df_all_types():\n return pd.DataFrame({\n 'intcol': [1, 2],\n 'strcol': ['three', 'four'],\n 'floatcol': [5.0, 6.0],\n 'boolcol': [True, False],\n 'datetimecol': [\n np.datetime64('2020-01-01'), np.datetime64('2020-01-02')],\n })", "def fix_time_fields(self):\n time_fields = {\"Time of day\": lambda time: time.hour, \"Time of year (month)\": lambda time: time.month}\n for time_field in time_fields.keys():\n for i in range(self.df.shape[0]):\n value = self.df[time_field][i]\n if type(value) is datetime.time or type(value) is datetime.datetime:\n self.df[time_field].loc[i] = time_fields[time_field](value)", "def _checkTimestamp(self, acquisition_time):\n\n #\n # Check for None\n #\n if acquisition_time is None:\n raise Exception('Invalid acquisition_time {acquisition_time}'.\n format(acquisition_time =acquisition_time))\n\n #\n # Do the conversion\n # \n acquisition_time_candidate = (parser.parse(acquisition_time)).timetuple()\n\n #\n # Check several values\n # \n if acquisition_time_candidate.tm_year < 2015:\n raise Exception('Invalid year {year} in acquisition time {acquisition_time}'.\n format(year = acquisition_time_candidate.tm_year, acquisition_time =acquisition_time))\n\n #\n # Return if it passed all tests\n #\n return acquisition_time_candidate", "def test_daal_pca_bad_column_type(self):\n with self.assertRaisesRegexp(Exception, \"columns must be a list of strings\"):\n self.context.daaltk.models.dimreduction.pca.train(self.frame, 10, k=10)", "def __convert_two_cols(raw_array: list, num_rows: int):\n\n # no need to check extra row, we can go right into conversions\n for i in range(num_rows):\n\n # float conversion\n float_value = float(raw_array[i][1])\n raw_array[i][1] = float_value\n\n # no need to delete an extra entry,\n # we can just convert the existing string and assign it\n timestamp = pd.Timestamp(raw_array[i][0])\n raw_array[i][0] = timestamp\n\n return", "def _parse_dtypes(data, table_meta):\n for name, field in table_meta['fields'].items():\n field_type = field['type']\n if field_type == 'datetime':\n datetime_format = field.get('format')\n data[name] = pd.to_datetime(data[name], format=datetime_format, exact=False)\n elif field_type == 'numerical' and field.get('subtype') == 'integer':\n data[name] = data[name].dropna().astype(np.int64)\n elif field_type == 'id' and field.get('subtype', 'integer') == 'integer':\n data[name] = data[name].dropna().astype(np.int64)\n\n return data", "def can_convert_to_column(obj):\n return is_column_like(obj) or cudf.api.types.is_list_like(obj)", "def check_numeric(data, col):\n from pandas.api.types import is_numeric_dtype\n try:\n if is_numeric_dtype(data[col]):\n logging.info(f' {col} is numeric.')\n return data\n else:\n numdata = (data\n .drop([col], axis=1)\n .join(data[col].apply(pandas.to_numeric, errors='coerce'))\n )\n numcol = numdata[col].isnull().values().sum()\n logging.warning(f' %s rows in %s are non-numeric' % (numcol, col,))\n logging.warning(f' {col} is tested by coercing into numeric values.')\n return numdata\n except:\n logging.error(f' the format of %s is not testable.' % (col,))\n print(data.head(n=2))\n sys.exit(1)", "def _is_all_int(df_list: List[Union[dd.DataFrame, pd.DataFrame]], col: str) -> bool:\n for df in df_list:\n if col in df.columns:\n srs = df[col]\n if isinstance(srs, (dd.DataFrame, pd.DataFrame)):\n for dtype in srs.dtypes:\n if not is_integer_dtype(dtype):\n return False\n elif isinstance(srs, (dd.Series, pd.Series)):\n if not is_integer_dtype(srs.dtype):\n return False\n else:\n raise ValueError(f\"unprocessed type of data:{type(srs)}\")\n return True", "def isInteger(data):\n\tif type(data) == list or type(data) == np.ndarray:\n\t\tcol = pd.Series(data)\n\telse:\n\t\tcol = data\n\treturn col.dtype == np.int32 or col.dtype == np.int64", "def fix_dates(self, row):\r\n for field in self.date_fields:\r\n if field in row:\r\n if not type(row[field]) is datetime:\r\n try:\r\n row[field] = datetime.fromtimestamp(float(row[field]))\r\n except Exception as e:\r\n row[field] = None", "def _validate_time(self, col):\r\n error_rows = [] # initialize list of rows with errors\r\n # Loop through data and validate time values\r\n for i, row in enumerate(self.rows):\r\n csv_row = i + 1\r\n time_of_survey = row[col]\r\n time24hr_pattern = \"^(2[0-3]|[01]?[0-9]):([0-5]?[0-9]):([0-5]?[0-9])$\"\r\n time12hr_pattern = \"^(1[0-2]|0?[1-9]):([0-5]?[0-9]):([0-5]?[0-9])( ?[AP]M)?$\"\r\n\r\n if \"M\" in time_of_survey:\r\n if not re.search(time12hr_pattern, time_of_survey):\r\n error_rows.append(csv_row)\r\n else:\r\n if not re.search(time24hr_pattern, time_of_survey):\r\n error_rows.append(csv_row)\r\n return error_rows", "def _checkData(data: Sequence[HistoryElement]):\r\n if not all(x.timeStamp for x in data):\r\n raise ValueError(\"At least one element in data doesn't have a TimeStamp\")", "def check_cols(self):\n if self.ad_tab is not None and 'date' not in self.ad_cols:\n raise DataException(\"\"\"date column not found in adServer table.\"\"\")\n if self.ad_tab is not None and 'impressions' not in self.ad_cols:\n raise DataException(\"\"\"impressions column not found in adServer table.\"\"\")\n if 'timestamp' not in self.log_cols and 'date' not in self.log_cols:\n raise DataException(\"\"\"Both timestamp and date column missing from {t}\nCannot do dailyQA\"\"\".format(t=self.log_tab))\n if self.configs['hourshift'] != 0 or 'date' not in self.log_cols:\n if 'timestamp' not in self.log_cols:\n raise DataException(\"\"\"Time shift requested \\\nbut no timestamp column in {t}.\"\"\".format(t=self.log_tab))\n else:\n check_timestamp(self.configs['schema'], self.log_tab)", "def _validate_date(self, col):\r\n error_rows = [] # initialize list of rows with errors\r\n # Loop through data and validate time values\r\n for i, row in enumerate(self.rows):\r\n csv_row = i + 1\r\n date_of_survey = row[col]\r\n try:\r\n [m, d, y] = date_of_survey.split('/')\r\n testdate = datetime.date(int(y), int(m), int(d))\r\n except:\r\n error_rows.append(csv_row)\r\n return error_rows", "def validate_date(column_name, value, date_format, column_data_type=\"date\"):\n value = value.replace(\"T\", \" \")\n dtpart = value.split(\" \")\n value = dtpart[0]\n try:\n datetime.strptime(value, date_format)\n return None\n except ValueError:\n return \"{0} : '{1}' is not a valid {2}\".format(column_name, value, column_data_type)", "def set_dtypes(df):\n # drop rows where a column names appear (happened while appending to csv)\n df = df.loc[df[df.columns[0]] != df.columns[0]]\n # convert numerics\n df = df.apply(pd.to_numeric, errors='ignore')\n # parse query_timestamp\n df.query_timestamp = df.query_timestamp.apply(pd.to_datetime)\n\n df.reset_index(inplace=True, drop=True)\n\n return df", "def test_columns_list_element_error(self):\n\n with pytest.raises(ValueError):\n\n BaseTransformer(columns=[[], \"a\"])", "def test_validate_datetime(self):\n self.datatrue = pd.read_csv(pkg_resources.resource_filename(resource_package, 'tests/testing_data/report_2_counts.csv'))\n\n self.datafalse = pd.read_csv(pkg_resources.resource_filename(resource_package, 'tests/testing_data/random_date1.csv'))\n\n self.test1 = utils.validate_datetime(self.datatrue)\n\n self.test2 = utils.validate_datetime(self.datafalse)\n\n self.assertTrue(isinstance(self.test1, pd.DataFrame))\n\n self.assertTrue(np.dtype('datetime64[ns]') in self.test1.dtypes.tolist())\n\n self.assertFalse(np.dtype('datetime64[ns]') in self.test2.dtypes.tolist())" ]
[ "0.6067635", "0.60331184", "0.60145605", "0.5865809", "0.5851198", "0.5844736", "0.5796423", "0.5776691", "0.57648784", "0.5759746", "0.5731371", "0.5680792", "0.5634782", "0.5600407", "0.5599236", "0.55912846", "0.55862063", "0.55465883", "0.55208796", "0.5520295", "0.55130774", "0.55093515", "0.5507206", "0.5490375", "0.5485433", "0.5476382", "0.5422888", "0.54198515", "0.5413749", "0.541146" ]
0.6348015
0
Redrawing at 50 hz causes the data to freeze. IMU box is refreshing at 10 hz now
def redraw_viz(): global g_last_draw if (rospy.Time.now().to_sec() > (refresh_rate + g_last_draw)): g_last_draw = rospy.Time.now().to_sec() # redraw imu box doDraw()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def redraw(self):\r\n self.c.update()", "def refresh(self, _loop, data):\n try:\n if(self.model.mode == 'live'):\n self.updateGraphs()\n self.model.memory = int(self.dataClient.recv())\n self.model.cpu = float(self.dataClient.recv())\n except EOFError:\n pass\n except Exception as e:\n self.logger.error(e)\n\n self.view.refresh()\n _loop.set_alarm_in(guiRefreshTimer, self.refresh)", "def refresh(self):\n\n self.ax.relim()\n self.ax.autoscale_view()\n self.canvas.draw()", "def Update(self, ticks=0):", "def on_redraw_timer(self, event):\n \n if self.sampling_timer.IsRunning():\n self.daq.get_data()\n self.draw_plot()\n else:\n self.control_box.txt_info_box.SetLabel('Measurement complete')\n self.calculate()\n return", "def _DoUpdateRedraw( self, hilite = True ):\n pass", "def update(self):\n self.redraw()\n self._changed = False", "def update(self):\n self.redraw()\n self._changed = False", "def update(self, dt):", "def update(self, dt):", "def update(self):\n self.grid.update()\n sleep(self.update_rate)", "def realtime(self):", "def _update_display(self, loop=True):\n\n sensors_data = self.get_sensors_data()\n\n if self.current_item.entity_type is WeatherEntityType.TEMPERATURE:\n pixels = self.current_item.show_pixels(sensors_data[0])\n elif self.current_item.entity_type is WeatherEntityType.HUMIDITY:\n pixels = self.current_item.show_pixels(sensors_data[2])\n else:\n pixels = self.current_item.show_pixels(sensors_data[3])\n\n self._sense_hat.set_rotation(self.current_style.rotation)\n self._sense_hat.set_pixels(pixels)\n\n if loop:\n self._update_timer = self._start_timer(Config.UPDATE_INTERVAL, self._update_display)", "def plot_refresh():\n figure.canvas.draw()", "def _UpdatePlot( self ):\n self._BusyDoOp( self._UpdatePlotImpl )", "def update_display(self):\n self.lick_plot_0.setData(self.k+self.T,self.buffer[:,1]) \n self.lick_plot_1.setData(self.k+self.T,self.buffer[:,2]) \n self.breathing_plot.setData(self.k+self.T,self.buffer[:,0]) \n \n if self.settings.movie_on.value():\n self.camera_image.setImage(self.camera.read())\n if self.settings.save_movie.value():\n self.camera.write()\n \n #print(self.buffer_h5.size)", "def update_draw(*args):\n\n # use the global 'i' variable\n global i\n\n # get the CPU usage information\n result = get_cpu_usage()\n\n # append new data to the datasets\n user.append(result[0])\n nice.append(result[1])\n sys.append( result[2])\n idle.append(result[3])\n\n # update lines data using the lists with new data\n l_user.set_data(range(len(user)), user)\n l_nice.set_data(range(len(nice)), nice)\n l_sys.set_data( range(len(sys)), sys)\n l_idle.set_data(range(len(idle)), idle)\n\n # force a redraw of the Figure\n fig.canvas.draw()\n\n # after 30 iteration, exit; else, sleep 1 second\n i += 1\n if i > 30:\n return False\n else:\n time.sleep(1)\n\n return True", "def onTimer(self, evt): \r\n # restore the clean background, saved at the beginning \r\n self.canvas.restore_region(self.bg) \r\n # update the data \r\n self.r1 = self.r1[1:] + [t1.getRatio()] \r\n self.r2 = self.r2[1:] + [t2.getRatio()]\r\n self.v1 = self.v1[1:] + [t1.getSpeed()] \r\n self.v2 = self.v2[1:] + [t2.getSpeed()] \r\n \r\n # update the plot \r\n self.pr1.set_ydata(self.r1) \r\n self.pr2.set_ydata(self.r2)\r\n self.pv1.set_ydata(self.v1) \r\n self.pv2.set_ydata(self.v2) \r\n # just draw the \"animated\" objects \r\n self.ax.draw_artist(self.pr1) \r\n self.ax.draw_artist(self.pr2)\r\n self.ax.draw_artist(self.pv1) \r\n self.ax.draw_artist(self.pv2) \r\n self.canvas.blit(self.ax.bbox)", "def update_io(self, dt):\n self.light.change_color(traffic_lights_binary())\n self.seven_segment_display.activate_segments(seven_segment_binary())\n self.ascii.update_ascii_grid()", "def _refresh(self):\n self._need_display_update = True\n self._update()", "def draw(self):\r\n scalex,scaley = self.getScale()\r\n try:\r\n self.clear()\r\n # Draw Graph Background\r\n self.drawLayout()\r\n if self.app.data == None:# If no data, break\r\n return\r\n # How much each pixel represents\r\n if scalex[1]-scalex[0] == 0:\r\n return\r\n step = (scalex[1]-scalex[0])/self.w# Draw lines at pixel level resolution\r\n self.fitYScale()\r\n sens_index = [0]# If one sensor displayed in this data player\r\n if len(self.sensor_ids) == 2:# If two sensors displayed in this data player\r\n sens_index = [1,0]# Draw order blue then red to make blue line on top\r\n for s in sens_index:\r\n i = scalex[0]\r\n x = 0\r\n trackcol = self.app.getSensorCol(self.sensors[self.sensor_ids[s]])\r\n while i < scalex[1]:\r\n i += step# i Is data\r\n x += 1# x is iteration/pixel-coordinate\r\n if i<0:# Skip data for t<0\r\n continue\r\n try:\r\n # Data retrieved from xml\r\n y = float(self.app.data[int(i)][self.sensor_ids[s]].text)\r\n y2 = float(self.app.data[int(i+step)][self.sensor_ids[s]].text)\r\n # Normalize into range 0 to 1 and multiply by height\r\n y = ((y-scaley[0])/(scaley[1]-scaley[0])) * self.h\r\n y2 = ((y2-scaley[0])/(scaley[1]-scaley[0])) * self.h\r\n except IndexError:# Missing data is skipped\r\n continue\r\n self.c.create_line(x,-y+self.h,x+1,-y2+self.h,fill=trackcol,width=1)\r\n self.drawScrubber()\r\n self.drawPeekScrubber()\r\n self.c.update()\r\n except tk.TclError:# If canvas destroyed, cancel draw operation\r\n return", "def update_plot():\n pass", "def plot_refresh_handler(args):\n stream_data, runlimits, runflags = args\n if runflags.exit:\n sys.exit(1)\n\n for line_name in stream_data:\n data = stream_data[line_name]\n curr_data_len = len(data['y'])\n if curr_data_len == 0:\n # no data yet\n continue\n\n if data['last_len'] >= curr_data_len:\n # no new data since last update\n continue\n\n # save length of last line draw\n data['last_len'] = curr_data_len\n\n if FLAGS.timestamp:\n x_data = numpy.array(data['x'])\n else:\n x_data = numpy.array(range(curr_data_len))\n y_data = numpy.array(data['y'])\n\n runlimits.x_max = max(max(x_data), runlimits.x_max)\n runlimits.x_min = runlimits.x_max-FLAGS.width\n\n if FLAGS.ymin is not None:\n runlimits.y_min = FLAGS.ymin\n else:\n runlimits.y_min = min(min(y_data), runlimits.y_min)\n\n if FLAGS.ymax is not None:\n runlimits.y_max = FLAGS.ymax\n else:\n runlimits.y_max = max(max(y_data), runlimits.y_max)\n\n data['line'].set_data(x_data, y_data)\n if runflags.update_axis:\n axes = data['line'].get_axes()\n axes.relim()\n axes.set_xlim(runlimits.x_min-1, runlimits.x_max+1)\n axes.autoscale_view(scaley=True, scalex=False)\n\n manager = pylab.get_current_fig_manager()\n manager.canvas.draw()", "def update_visualizer(self):\n if self.visualizer:\n if self.frame_count == 2:\n self.visualizer.add_geometry(self.vis_points)\n self.visualizer.update_geometry(self.vis_points)\n self.visualizer.poll_events()\n self.visualizer.update_renderer()\n time.sleep(0.001)\n self.frame_count += 1", "def update(self, delta_time):\r\n #for pixels in self.pixel:\r\n for line in self.cursor:\r\n line.draw()\r\n \r\n self.check_keys()", "def update_figure(self):\n\n self.draw()", "def __call__(self, max_framerate=90):\n\t\tself.generalDisplayUpdate()\n\t\tGraphics.clock.tick(max_framerate)\n\t\treturn self.mainloop()", "def OnIdle(self, ):\r\n self.triggerRedraw(1)\r\n return 1", "def redraw(self):\n raise NotImplementedError()", "def on_timer(self, event):\n \n o = Unicorn()\n data = o.get_data(rt)\n k = len(data[0])\n y[:, :-k] = y[:, k:]\n y[:, -k:] = remap((data), -40, 40, -1, 1 ) \n t2 = _thread.start_new_thread(printT, ())\n #y2 = np.array([lfilter(b, a, y[i]) for i in range(17)])\n self.program['a_position'].set_data(y.ravel().astype(np.float32))\n self.update()" ]
[ "0.6556265", "0.6498374", "0.63705957", "0.63530284", "0.63191485", "0.63020533", "0.62957853", "0.62957853", "0.62845606", "0.62845606", "0.62780565", "0.6274635", "0.62744427", "0.62476283", "0.62291384", "0.62132174", "0.6209274", "0.62041783", "0.6187864", "0.6184698", "0.61283225", "0.6114761", "0.6112002", "0.6091022", "0.60837144", "0.60546565", "0.60287714", "0.59716815", "0.59684366", "0.5961199" ]
0.7307154
0
returns True if n is a 'bouncy' number.
def bouncy(n): diffs = [int(b)-int(a) for a,b in zip(str(n)[:-1],str(n)[1:])] return sum([abs(x) for x in diffs])>abs(sum(diffs))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Happy(n, b):\r\n n = ToBase(b, n)\r\n seen = set()\r\n while n not in seen:\r\n seen.add(n) \r\n v = 0\r\n while n:\r\n d = n % 10\r\n n = n // 10\r\n v += d * d\r\n n = ToBase(b, v) \r\n if n == 1:\r\n return True\r\n return False", "def isPowerOfTwo(self, n: int) -> bool:\n if n <= 0:\n return False\n return bin(n).count('1') == 1", "def is_number(n):\n\ttry:\n\t\tfloat(n)\n\t\treturn True\n\texcept ValueError:\n\t\treturn False", "def checkBase(base, n):\n current = 1\n while current < n:\n current = current * base + 1\n return current == n", "def is_po2(n) -> bool:\n return not (n & (n - 1))", "def sat(n: int):\n return pow(2, n, n) == 3", "def isgoodnum(n):\n return (not isinstance(n,bool)) and isinstance(n,(int,float))", "def mystery_2b_no_if(n: int) -> bool:\n return (n % 2 == 0 and n % 3 == 1) or (n % 2 != 0 and (n < 0 or (n > 4 and n % 3 != 1)))", "def is_hilbert_squarefree_number(n):\n ubound = math.ceil(n / 2)\n for a in range(5, ubound + 1):\n if is_hilbert_square(a) and n % a == 0:\n return False\n return True", "def isReversible(n): \n if n % 10 == 0:\n return False\n s = n + reverseNum(n)\n while s > 0:\n digit = s % 10\n if not digit in [1,3,5,7,9]:\n return False\n s //= 10\n return True", "def is_hilbert_number(n):\n return n > 0 and n % 4 == 1", "def check_mountain_number(n):\n def helper(x, is_incresing):\n if x // 10 == 0:\n return True\n if is_incresing and (x % 10) < ((x // 10) % 10):\n return helper(x // 10, is_incresing)\n return (x % 10) > ((x // 10) % 10) and helper(x // 10, False)\n return helper(n, True)", "def isHappy(self, n):\n nxt = 0\n appeared = set()\n while True:\n nxt += (n%10)*(n%10)\n n /= 10\n if n == 0:\n if nxt == 1:\n return True\n if nxt in appeared:\n return False\n\n appeared.add(nxt)\n n = nxt\n nxt = 0", "def binary_blow_wind():\n s = random.random()\n return s < 0.05", "def sat(n: int, nums=[15, 27, 102], upper_bound=5000):\n return all(n % i == 0 for i in nums) and n <= upper_bound", "def is_abundant(n):\r\n if sum_proper_divisors(n) > n:\r\n return True\r\n else:\r\n return False", "def isHappy(self, n):\n cycle = set()\n while n != 1 and n not in cycle:\n cycle.add(n)\n n = sum(int(i)**2 for i in str(n))\n return n == 1", "def sat(n: int, nums=[77410, 23223, 54187], lower_bound=2):\n return all(i % n == 0 for i in nums) and n >= lower_bound", "def verify(n):\n\n # Take the sum of all digits.\n sum_of_digits = sum(luhn_digits(n))\n\n # The number is valid iff the sum of digits modulo 10 is equal to 0\n return sum_of_digits % 10 == 0", "def is_abundant_number(x):\n return sum(proper_divisors(x)) > x", "def is_pentagonal_number(n):\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()", "def main():\n number = 99\n bouncy_n = 0\n while True:\n number += 1\n if IsBouncy(number):\n bouncy_n += 1\n proportion = (bouncy_n / number)\n if proportion == 0.99:\n print(f'The least number when the proportion of bouncy numbers is 99% is {number:,}')\n break", "def perfectd(n: int) -> bool:\n if sum(divisors(n)) - n == n:\n return True\n else:\n return False", "def _isvalid(self, x):\n return (x <= self.n) & (x > 0)", "def isHappy(n):\n def check_num(n):\n res = 0\n while n > 0:\n n, digits = divmod(n, 10)\n res += digits ** 2\n return res\n \n \n seen = set()\n while n != 1 and n not in seen:\n seen.add(n)\n n = check_num(n)\n \n return n == 1", "def is_pent(n):\n pen_test = (1 + sqrt(24*n + 1))/6\n if pen_test == int(pen_test):\n return True\n return False", "def is_circular_prime(n):\r\n\r\n # pdb.set_trace()\r\n s = str(n)\r\n for i in xrange(len(s)):\r\n if not is_prime(n):\r\n return False\r\n s = s[1:] + s[0]\r\n n = int(s)\r\n\r\n return True", "def CBool(num):\n n = float(num)\n if n:\n return 1\n else:\n return 0", "def is_simple_number(x: int):\n divisor = 2\n while divisor < x:\n if x % divisor == 0:\n return False\n divisor += 1\n return True", "def buyable(n: int) -> bool:\n if n in [4, 6, 25]:\n return True\n elif n < 4:\n return False\n else:\n buyability = False\n for size in [4, 6, 25]:\n buyability |= buyable(n - size)\n return buyability" ]
[ "0.6266255", "0.5925547", "0.5884278", "0.58818424", "0.5877583", "0.58409125", "0.5840892", "0.58336264", "0.5791553", "0.5779341", "0.57189405", "0.5716756", "0.56896675", "0.56802714", "0.56628543", "0.56577027", "0.5654589", "0.5652041", "0.565071", "0.5629876", "0.56120986", "0.5593728", "0.55885416", "0.5576035", "0.5566554", "0.5555344", "0.5535059", "0.551029", "0.5491235", "0.5486338" ]
0.71531487
0
Gets the AI's move from the current configuration.
def get_ai_move(board): return Connect4MiniMax.get_move(board)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_move(self, game_state: BotGameState) -> BotMove:\n return", "def get_move(self):\n if self._difficulty == 0:\n return self._get_easy_move()\n else:\n # Different stategies/difficulties can be attached here\n return", "def get_move(self, game):\n return", "def _get_move(self) -> Tile:\n if not self.game_state:\n raise RuntimeError(\"Cannot call get_move when the game has not started!\")\n if isinstance(self.current_turn, Player):\n return self._get_player_move()\n elif isinstance(self.current_turn, Enemy):\n return self._get_enemy_move()\n else:\n raise TypeError(\"You're trying to move something that isn't a character or an adversary.\")", "def best_move(self):\n if self._move is not None:\n return self._move\n else:\n return self.pass_move", "def getMove(self, board):\n pass", "def move(self):\r\n move = None\r\n if self.last_move is None:\r\n move = rockyman.move(self)\r\n else:\r\n index = the_moves.index(self.last_move) + 1\r\n if index >= len(the_moves):\r\n index = 0\r\n move = the_moves[index]\r\n self.last_move = move\r\n return move", "def get_move(moves):\n pass", "def movee(self):\n\n #return the initial state if he cant move and he's in the initial state\n if not self.move and self.index == 0:\n return self.path[self.index]\n\n #return the goal state if he's at the goal state\n if self.index == len(self.path):\n return self.path[-1]\n\n #return the next move and increments the index attribute\n nextMove = self.path[self.index]\n self.index += 1\n\n return nextMove", "def getMove(self, board):\r\n moves = self._getAvailableActions(board)\r\n return moves[-1]", "def get_ai_move(board, player):\n row, col = 0, 0\n return row, col", "def get_ai_move(board, player):\r\n row, col = 0, 0\r\n return row, col", "def move(self):\r\n if self.last_op_move is None:\r\n return rockyman.move(self)\r\n else:\r\n return self.last_op_move", "def move(self):\n if self.learn is None:\n return random.choice(moves)\n else:\n return self.storedmove", "def next_move(self):\n return self.decoded_population[self.current_index]", "def get_next_move(self):\n return int(input('Enter your move: '))", "def next_move(self, board):\n \n return self.best_move(self.score_columns(board))", "def getMove(self, board):\r\n raise NotImplementedError(\"must be implemented in subclass\")", "def _get_player_move(self) -> Tile:\n if not self.game_state:\n raise RuntimeError(\"Cannot call get_player_move when the game has not started!\")\n current_player = next(player for player in self.player_list if player.name == self.current_turn.name)\n if current_player is None:\n raise RuntimeError(\"Attempted to get player move from a player who does not exist!\")\n return current_player.move()", "def get_move(self, game, legal_moves, time_left):\n\n self.time_left = time_left\n\n if not legal_moves:\n return (-1, -1)\n\n move = None\n try:\n algorithm_name = getattr(self, self.method) # Method selected to get move (minimax, alphabeta)\n if self.iterative:\n depth = 1 # Depth used for iterative deepening\n while True:\n _, move = algorithm_name(game, depth)\n depth += 1\n else:\n _, move = algorithm_name(game, self.search_depth)\n except Timeout:\n return move # A timeout has occurred, return best move so far\n return move", "def get_move(self, i):\n # Exception if not (0 <= i < self.length)\n return self._moves[i]", "def get_next_move(self, game_state):\n next_move = None\n encoded_game_state = self.__encode_state(game_state)\n\n self.__init_q_values(game_state)\n\n if random.random() < self.epsilon:\n next_move = self.__get_next_random_move(game_state)\n self.__update_epsilon()\n else:\n next_move = self.__get_next_greedy_move(game_state)\n\n self.game_moves_history.append((encoded_game_state, next_move))\n\n return next_move", "def _get_enemy_move(self) -> Tile:\n if not self.game_state:\n raise RuntimeError(\"Cannot call get_enemy_move when the game has not started!\")\n current_enemy = next(enemy for enemy in self.enemy_list if enemy == self.current_turn)\n if current_enemy is None:\n raise RuntimeError(\"Attempted to get a move from a nonexistent enemy!\")\n return current_enemy.move()", "def get_optimal_move(self):\n # create the root state\n root = State(self.current_board, True, self.__machine_token, self.__human_token)\n # alpha-beta-pruning algorithm\n best_move = max_value_a_b(root, depth(root), -1000, 1000)\n # obtain the direct children.\n direct_children = get_direct_children(root, all_states_generated)\n # obtain the coordinates of the movement.\n for direct_child in direct_children:\n if direct_child.value == best_move:\n return get_coordinates(root, direct_child)", "def choose_move(self):\r\n \r\n return None", "def get_move(self, board, possible_moves):\n next_move = None\n max_score = -float('Inf')\n self.start_time = datetime.now()\n for depth in range(2,3): # iterative deepening\n try:\n for move in possible_moves:\n board_copy = deepcopy(board)\n self.man.play_move(board_copy, move, self.color)\n score = self.minimaxm(depth, board, False)\n if score > max_score:\n max_score = score\n next_move = move\n\n except TimeoutError:\n print(\"ran out of time\")\n break\n return next_move", "def choose_absolute_move(self):\n move = self.choose_move()\n if self.player_name == 'A':\n return move\n # Player B, revert the IDs\n return (move + 6) % 12", "def getMove(self, board):\r\n self.thisNumTurns += 1\r\n moves = self._getAvailableActions(board)\r\n return moves[random.randint(len(moves))]", "def get_move(self, board):\n # First, check if we can win in the next move\n winning_move = self.get_winning_move(board, self.letter)\n if winning_move is not None:\n return winning_move\n # Check if the player could win on their next move, and block them.\n blocking_move = self.get_winning_move(board, self.opponent_letter)\n if blocking_move is not None:\n return blocking_move\n # Try to take one of the corners, if they are free.\n corner_move = self.move_in_a_corner(board)\n if corner_move is not None:\n return corner_move\n # Try to take the center, if it is free.\n if board.size % 2 == 1:\n if board.is_position_availible(board.letters[board.size // 2]\n + board.numbers[board.size // 2]):\n return board.letters[board.size // 2] + board.numbers[board.size // 2]\n # Move on one of the sides.\n return self.choose_random_move_from_list(board, list(board.positions.keys()))", "def get_move(self, game, time_left):\n self.time_left = time_left\n return self.minimax(game, self.search_depth)" ]
[ "0.75694376", "0.7480863", "0.7418201", "0.7150604", "0.68652487", "0.6862122", "0.68234956", "0.67731434", "0.6764384", "0.67577225", "0.6746614", "0.66947275", "0.6655632", "0.6652874", "0.6640375", "0.6605584", "0.66051507", "0.6581102", "0.6555464", "0.6554516", "0.6552523", "0.6490748", "0.64867103", "0.6480396", "0.6439309", "0.643484", "0.6433504", "0.6429952", "0.6409958", "0.6400323" ]
0.78304815
0
returns message info. with v2 messaging, this method does not work.
def get_message_info(self, msgid=None): raise NotImplementedError('This method is not supported ' 'with v2 messaging') if msgid: return self.sms_client.get_message(msgid)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Get_Message_Info(service, userId, message_id):\n message_info = service.users().messages().get(userId=userId, id=message_id).execute()\n\n ID = message_info['id']\n thread_id = message_info['threadId']\n header_info = message_info['payload']['headers']\n for header in header_info:\n if header['name']=='Message-ID':\n message_id=header['value']\n if header['name']=='From':\n sender=header['value']\n if header['name']=='Subject':\n subject=header['value']\n attachment_info = message_info['payload']['parts']\n attachment_list = []\n for attachment in attachment_info:\n if attachment['mimeType'] == 'application/pdf':\n attachment_list.append(attachment['filename'])\n\n info = (sender, subject, thread_id, message_id, attachment_list, ID)\n return info", "def get_message(self):\n return self.msg", "def msg_info_dict(self):\n return self._msg_info_dict", "def message(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'message')\r\n return http.Request('GET', url), parsers.parse_json", "def get_message(self):\n return self.message", "def get_message(self):\n return self.message", "def _get_message(self):\n return self.__message", "def get_message(self, resp):\n return resp['message']", "def get_message(self):\n return self.__mess", "def getMessage(self):\n return self.message", "def getMessage(self):\n return self.message", "def get_message (self) :\n return self._message", "def msg(self):\n if \"msg\" in self._json:\n return self._json[\"msg\"]\n elif \"detail\" in self._json:\n return self._json[\"detail\"]\n else:\n return self._json", "def getMessage():\n return message", "def getInfo(uid):\n\n if TESTING:\n result, data = mailBox.uid('FETCH', uid, '(BODY.PEEK[HEADER])')\n else:\n result, data = mailBox.uid('FETCH', uid, 'RFC822')\n\n if result != \"OK\":\n print \"\\nERROR: Could not fetch message\"\n return\n\n raw_email = data[0][1]\n email_message = email.message_from_string(raw_email)\n result, data = mailBox.uid('FETCH', uid, 'RFC822.SIZE')\n size = re.search('SIZE (\\d+)', uid).group(1)\n\n print \"\\nInfo about message:\"\n print \"-------------------------------------\"\n print \"From:\", email_message['From']\n print \"To:\", email_message['To']\n print \"Cc:\", email_message['Cc']\n print \"Bcc:\", email_message['Bcc']\n print \"Subject:\", email_message['Subject']\n print \"Date:\", email_message['Date']\n print \"Message Size:\", size", "def get_message(self):\n data = self.socket.recv(1024)\n if not data:\n logging.error('Failed to read data from socket')\n return\n\n return self.decode_message(data)", "def get_message(request):\r\n message_key = request.GET.get('message', None)\r\n message = None\r\n message_type = None\r\n\r\n if ((not message_key or message_key == 'upload_success') and\r\n QIFParser.get_status() == 'in_progress'):\r\n message_key = 'in_progress_quicken_file'\r\n\r\n try:\r\n message = Message.MESSAGES[message_key]\r\n message_type = message['type']\r\n message = message['message']\r\n except KeyError:\r\n pass\r\n\r\n return {'message': message,\r\n 'message_key': message_key,\r\n 'message_type': message_type}", "def message(self) -> str:\n return self.fields.get('msg', self.raw_string)", "def message(self):\n return self._message", "def get_incoming_message(self):\n\n if self.incoming_message['is_used']:\n return {}\n\n self.incoming_message['is_used'] = True\n return self.incoming_message['message']", "def get_message(self, message_id):\n req_data = [ str(message_id) ]\n return self.request(\"find:Message.stats, Message.content\", req_data)", "def msg(self):\n\t\treturn self.message", "async def get_msg(self):\n try:\n # 2^8 bytes at a time. I just like it, no special reason\n data = await self.reader.read(256)\n msg = data.decode()\n addr = writer.get_extra_info(\"peername\")\n logging.info(\"Received %s from %s\", (msg, addr))\n\n except Exception as e:\n logging.error(\"Command could not be decoded; %s\", e)\n\n return msg", "def get_message():\n\tincoming_message = conn.recv(1024)\n\tincoming_message = incoming_message.decode()\n\treturn incoming_message", "def _get_plain_message (self) :\n return self._message", "def message(self) -> str:\n return pulumi.get(self, \"message\")", "def message(self) -> str:\n return pulumi.get(self, \"message\")", "def message(self) -> str:\n return pulumi.get(self, \"message\")", "def message(self) -> str:\n return pulumi.get(self, \"message\")", "def message(self) -> str:\n return pulumi.get(self, \"message\")" ]
[ "0.7477835", "0.7271753", "0.72640973", "0.7257839", "0.71884465", "0.71884465", "0.71538955", "0.71054775", "0.7103566", "0.7045384", "0.7045384", "0.70283306", "0.686375", "0.6844654", "0.6824819", "0.6769432", "0.6716818", "0.66525507", "0.66496587", "0.66455567", "0.6624531", "0.6617425", "0.65756744", "0.6569878", "0.65235347", "0.6520697", "0.6520697", "0.6520697", "0.6520697", "0.6520697" ]
0.8126016
0
buy a phone number 'phone_number' from bandwidth
def buy_phone_number(self, phone_number=None, area_code=None, user_id=None, site_id=None, country_code='US'): if country_code not in ('US', 'CA'): logging.info('Only numbers in US or CA are supported, requested ' 'country: {}'.format(country_code)) site_id = site_id if site_id else settings.BW_SITE_ID if phone_number: if validatePhoneNumber(phone_number, False) is False: raise ValueError("Invalid phone number passed- unable to buy") # a specific number ought to be ordered logging.info('buy_phone_number(): buying requested number: {}.'. format(phone_number, site_id)) try: newNumber = self.account_client.order_phone_number( number=self._parse_number_to_bw_format(phone_number), name='SendHub Customer: {}'.format(user_id), quantity=1, siteid=site_id ) except BandwidthOrderPendingException as order_id: logging.warn('Order {} is pending for phone number: {}, ' 'user: {}, looks like bandwidth service is ' 'slow. Error out for now and nightly cleanup ' 'task will release the number.'. format(order_id, phone_number, user_id)) raise BWNumberUnavailableError( 'Pending Number Order: ' + SHBandwidthClient.NUMBER_UNAVAILABLE_MSG ) except BandwidthAccountAPIException as e: # If we didn't get the number, throw an error err_resp = u'We could not get number {} from our carrier. ' \ u'Carrier Message: {}.'.format(phone_number, e) logging.error(err_resp) raise BWNumberUnavailableError(err_resp) # we bought the number successfully return self._cleanup_and_return_numbers(newNumber, quantity=1) else: if area_code is None: return False try: ordered_number = self.account_client.search_and_order_local_numbers( # noqa area_code=area_code, quantity=1, name='SendHub Customer: {}'.format(user_id), siteid=site_id ) except BandwidthOrderPendingException as order_id: logging.warn('Order {} is pending for a number in ' 'area code: {}, user_id: {}, qty: 1, ' 'looks like bandwidth service is slow. ' 'Error out for now and nightly cleanup task ' 'will release the number.'. format(order_id, area_code, user_id)) raise AreaCodeUnavailableError( 'Pending Area Code Order: ' + SHBandwidthClient.NUMBER_UNAVAILABLE_MSG ) except BandwidthAccountAPIException as e: # If we didn't get the number, throw an error logging.error(u'buy_phone_number(): could not get number. ' u'Throwing an error - {}.'.format(e)) raise AreaCodeUnavailableError( SHBandwidthClient.NUMBER_UNAVAILABLE_MSG ) return self._cleanup_and_return_numbers(ordered_number, quantity=1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send(self, phone_number):\n #response = self.client.publish(PhoneNumber=phone_number, Message=self.message)\n return True", "def phonecall():\n phone_number = choice(phone_numbers)\n r = twiml.Response()\n r.dial(phone_number)\n return str(r)", "def dial(phone_number):\n call = client.calls.create(\n to='+{}'.format(phone_number),\n from_=twilio_phone_number,\n url=twiml_instructions_url,\n )\n print(call.sid)\n return \"dialing +{}. call SID is: {}\".format(phone_number, call.sid)", "def make_phone_call(self):\n client = Client(account_sid, auth_token)\n\n call = client.calls.create(\n url='http://demo.twilio.com/docs/classic.mp3',\n to=self.emergency_number,\n from_='+16505499680'\n )\n\n print(call.sid)", "def release_phone_number(self, number):\n number = str(number)\n if validatePhoneNumber(number, False) is False:\n raise ValueError(\"Invalid phone number {} - unable to release\".\n format(number))\n\n nat_number = self._parse_number_to_bw_format(str(number), 'US')\n try:\n self.account_client.delete_phone_number(nat_number)\n except BandwidthAccountAPIException as e:\n logging.info(\"Error Deleting phone# {}, Exception: {}\".\n format(number, e))\n raise", "def type_phone(self, phone):\n\n\t\twith allure.step(\"Type payee phone\"):\n\t\t\telement = Element(driver=self.driver,\n\t\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t\t locator=BillPayPageLocator.PHONE_INPUT)\n\t\t\telement.write(phone)\n\t\t\treturn None", "def place_call(self, number):\n call_params = urllib.urlencode({\n 'outgoingNumber' : number,\n 'forwardingNumber' : self.forwarding_number,\n 'subscriberNumber' : 'undefined',\n 'remember' : '0',\n 'phoneType' : self.phone_type,\n '_rnr_se': self.key\n })\n\n # Send the text, display status message \n self.response = self.opener.open(self.call_url, call_params).read()", "def send_mobile_money(self, transaction_id, phone_number: str, amount: float, channel: str):\n if phone_number.startswith(\"+\"):\n phone_number = phone_number[1:]\n response = send_request(\n data=self.get_mobile_parameters(\n vendor_id=self.vendor_id,\n transaction_id=transaction_id,\n amount=amount,\n phone_number=phone_number\n ),\n url=self.get_mobile_url(channel)\n )\n return response", "def _purchase(unpurchased_number):\r\n # Buying is literally AvailablePhoneNumber.purchase().\r\n # There is apparently a way to call the API to purchase directly based on an\r\n # by area code. I cannot find it.\r\n # purchase() returns False if the new_purchase fails, a PhoneNumber on success.\r\n try:\r\n new_purchase = unpurchased_number.purchase()\r\n except:\r\n raise\r\n if isinstance(new_purchase, PhoneNumber):\r\n return new_purchase # This returns a Twilio.PhoneNumber object\r\n if new_purchase == False: # we actually do want to make sure it is == False...\r\n raise PhoneNumberPurchaseException(\"Phone number purchase failed.\")\r\n raise Exception(\"Unknown error: new_purchase was neither bool nor PhoneNumber\")", "def send_sms_code(user_phone):\n client = Client(settings.TWILIO_ACCOUNT_SID, settings.TWILIO_AUTH_TOKEN)\n code = str(random.randint(0, 9999)).rjust(4, '0')\n\n try:\n message = client.messages.create(\n to=str(user_phone),\n from_=settings.TWILIO_NUMBER,\n body=f\"Your FoodBase verification code is: {code}\"\n )\n except Exception as e:\n print(e)\n return None\n else:\n return code", "def send_to_airtelmoney(self, transaction_id, phone_number, amount: float, ):\n return self.send_mobile_money(transaction_id, phone_number, amount, channel=\"airtelmoney\")", "def phone(self, new_number):\n self._phone.number = new_number", "def create_phone_number(n):", "def send_to_mpesa(self, transaction_id, phone_number, amount: float, ):\n return self.send_mobile_money(transaction_id, phone_number, amount, channel=\"mpesa\")", "async def get_phone(self):\n\n e = await self.request.request(url='https://accountinformation.roblox.com/v1/phone', method='get')\n return e", "async def change_phone(self, code: int, prefix: int, phone: int, password: str):\n data = {\n \"countryCode\": code,\n \"prefix\": prefix,\n \"phone\": phone,\n \"password\": password\n }\n e = await self.request.request(url='https://accountinformation.roblox.com/v1/phone', method='post', data=data)\n return e", "def create_phone_signup_code(self, phone, password):\r\n code = self.random_code(settings.CODE_LENGTH)\r\n # if phone in [\"+77753721232\", \"+77752470125\", \"+77074443333\", \"+77076799939\"]:\r\n # code = \"4512\"\r\n # else:\r\n # code = \"%0.4d\" % random.randint(0, 9999)\r\n\r\n # mobizonproxy.send_sms(phone, text=u\"{} - Код активации для Pillowz365\".format(code))\r\n activation = Activation(phone=phone,\r\n to_reset=False,\r\n password=make_password(password),\r\n code=code)\r\n activation.save()\r\n return activation", "def fp_from_phone(daily_phone_use):\n phone = kg_to_tonnes(daily_phone_use*1250)\n return phone", "def send_otp_to_primary_mobile(otp, mobile):\n print('Sending otp to mobile: ', otp, mobile)", "def number(self, new_phone):\n returned_num = self.get_valid_num(new_phone)\n if returned_num is None:\n raise ValueError\n self._phone = returned_num", "def purchase_number_in_same_area_code(phone_number):\r\n unpurchased_numbers = search_by_area_code(phone_number)\r\n for i in range(4):\r\n first_number = unpurchased_numbers[i]\r\n try:\r\n pn = _purchase(first_number)\r\n return pn\r\n except:\r\n continue", "def get_next_avail_number(self):\n area_code = int(self.conf['twilio_area_code'])\n numbers = [vbts_util.strip_number(str(n.phone_number)) for n in self.twilio_client.phone_numbers.list()]\n avail_numbers = vbts_util.get_next_avail_number(numbers)\n if (len(avail_numbers) == 0):\n # if we have none, get a new number\n new_numbers = self.twilio_client.phone_numbers.search(area_code=area_code)\n if not new_numbers:\n raise ValueError(\"No numbers available in area code %d\" % area_code)\n num = new_numbers[0]\n if not num.purchase(): # this does the buy!\n raise ValueError(\"Purchasing number failed\")\n # setup new number\n bought_num = self.twilio_client.phone_numbers.list(phone_number=num.phone_number)[0]\n bought_num.update(sms_url=self.conf['twilio_sms_url'], voice_url=self.conf['twilio_voice_url'], sms_fallback_url=self.conf['twilio_sms_fallback_url'], voice_method=self.conf['twilio_voice_method'], sms_method=self.conf['twilio_sms_method'], sms_fallback_method=self.conf['twilio_sms_fallback_method'])\n avail_numbers.append(vbts_util.strip_number(str(bought_num.phone_number)))\n\n return random.choice(avail_numbers)", "def phone(self, phone):\n\n self._phone = phone", "def phone(self, phone):\n\n self._phone = phone", "def phone(self, phone):\n\n self._phone = phone", "def phone(self, phone):\n\n self._phone = phone", "def phone(self, phone):\n\n self._phone = phone", "def send_to_elipa(self, transaction_id, phone_number, amount: float, ):\n return self.send_mobile_money(transaction_id, phone_number, amount, channel=\"elipa\")", "def _send_code(self, phone, code, case):\n raise NotImplementedError", "def telephone(self, telephone: str):\n\n self._telephone = telephone" ]
[ "0.60898256", "0.60582405", "0.6051987", "0.6038466", "0.59917986", "0.5905722", "0.5898733", "0.5886916", "0.5838351", "0.58346975", "0.58340526", "0.58339083", "0.58324045", "0.5787865", "0.574295", "0.5731969", "0.57210875", "0.56918734", "0.56572384", "0.5640075", "0.55992746", "0.5592766", "0.55788034", "0.55788034", "0.55788034", "0.55788034", "0.55788034", "0.55736405", "0.5545985", "0.5519357" ]
0.6985683
0
Find a number within an area code.
def find_number_in_area_code(self, area_code, quantity=1, country_code='US'): if country_code not in ('US', 'CA'): logging.info('Only numbers in US/CA are supported, requested ' 'country: {}'.format(country_code)) if quantity < 1: raise ValueError('Quantity can not be < 1 - passed: {}'. format(quantity)) try: numbers = self.account_client.search_available_local_numbers( area_code=area_code, quantity=quantity ) except BandwidthAccountAPIException as e: logging.info('Failed to search for phone number in given area ' 'code - error: {}'.format(e)) raise AreaCodeUnavailableError( SHBandwidthClient.NUMBER_UNAVAILABLE_MSG ) else: if not numbers: raise AreaCodeUnavailableError( SHBandwidthClient.NUMBER_UNAVAILABLE_MSG ) return self._cleanup_and_return_numbers(numbers, quantity)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def area_code(self):\n return self.number[:3]", "def main():\n print(search(range(1, 21, 2), 9)) # will print 4\n print(search(range(1, 21, 2), 0)) # will print -1", "def is_in(m):\n\tf=open('places.dat','r')\n\tr = f.read()\n\tf.close()\n\tif str(m) in r:\n\t\tj = r.find(m)/7\n\t\treturn j\n\telse:\n\t\treturn -1", "def search_by_area_code(phone_number):\r\n area_code_match = padded_area_code(phone_number)\r\n return _handle_phone_search(TWILIO_CLIENT.phone_numbers.search(contains=area_code_match))", "def find(self, area: Area) -> Area:\n pass", "def identify_habarea(indiv_xy_position, habarea_map): \n \n row=int(indiv_xy_position[0])\n col=int(indiv_xy_position[1])\n habarea=habarea_map[row][col]\n \n return habarea", "def find(self, start: ghidra.program.model.address.Address, value: int) -> ghidra.program.model.address.Address:\n ...", "def locate_number(\n pipeline: Pipeline,\n num: int,\n img: ImageBGR,\n) -> Optional[Tuple[int, int]]:\n\n box = locate_front_facing_text(pipeline, str(num), img)\n\n if box is None:\n return None\n\n (cx, cy) = center_of_box(box)\n\n return (round(cx), round(cy))", "def find_ordinate(x_value, a_value, b_value, field):\n\n # y_value may be found by simple substitution of x_value in the equation\n return (int(pow(x_value, 3, field)) + a_value * x_value + b_value) % field", "def point_in_zip(x):\r\n point = Point(x[0],x[1])\r\n y = None\r\n for ind,val in enumerate(gpd_zip['geometry']):\r\n if point.within(val):\r\n y = int(gpd_zip['ZIPCODE'].iloc[ind])\r\n return y", "def check_range(num):\n for i in ranges.keys():\n if num in ranges[i]:\n return i", "def find_places(query):\n parts = str(query).split(' ')\n for i, p in enumerate(parts):\n p = p.replace('-', ' ').strip()\n try:\n postal_code = int(p)\n if len(postal_code) == 4:\n print(postal_code, parts[i+1])\n # Check \n #response = get_osm_location(\"{postal_code} {name}\")\n #lon = response['lon']\n #lat = response['lat']\n #poly = \n except Exception as e:\n continue", "def grab_area_code(phone_number):\r\n #number of form +1 XXX XXX XXXX (this should be the form get_twilio_client provides)\r\n if \"+1\" == phone_number[:2]:\r\n return phone_number[2:5]\r\n # number of form 1 XXX XXX XXXX\r\n if len(phone_number) == 11 and phone_number[0] == '1':\r\n return phone_number[1:4]\r\n # number of form XXX XXX XXXX\r\n if len(phone_number) == 10:\r\n return phone_number[:3]\r\n raise BadPhoneNumberError('\"%s\" is an invalid phone number.' % phone_number)", "def find_floor(x):\n pat = r\"floor\\s*(\\d*)\"\n result = re.search(pat, str(x), flags=re.IGNORECASE)\n if result:\n return int(result.group(1))", "def get_single_location(chrom, pos):\n return CHROMOSOME_TO_CODE[chrom] * int(1e9) + pos", "def getWeatherIndex(code, return_if_none=Constants.return_value_index_of_weather_not_found):\n # Start the index with 0\n index = 0\n for i in [100, 200, 300, 400]:\n for j in [0, 33, 66]:\n if inWeatherCodeRange(code, i + j, i + j + 33):\n return index\n index += 1\n return return_if_none", "def purchase_number_in_same_area_code(phone_number):\r\n unpurchased_numbers = search_by_area_code(phone_number)\r\n for i in range(4):\r\n first_number = unpurchased_numbers[i]\r\n try:\r\n pn = _purchase(first_number)\r\n return pn\r\n except:\r\n continue", "def find_number(self, string):\n #string = string.encode('ascii', 'ignore')\n #return int(filter(str.isdigit, string))\n s = (re.findall('\\d+', string))\n return int(''.join(s))", "def find(number, A):\n\tfor x in A:\n\t\tif number == x:\n\t\t\treturn True\n\t\treturn False", "def getInt(self, address: ghidra.program.model.address.Address) -> int:\n ...", "def cell_containing(self,xy,neighbors_to_test=4): \n hit = self.select_cells_nearest(xy, count=neighbors_to_test, inside=True)\n if hit is None:\n return -1\n else:\n return hit", "def find(self, sub) -> int:\n pass", "def find(self,v):\n for i in range(len(self)):\n if near(self[i],v):\n return i\n return -1", "def inside(self, areas):\n\n poly_orig = geometry.Polygon(self.area_poly)\n poly_origb = affinity.scale(poly_orig, xfact=1.1, yfact=1.1)\n idf = shapely.vectorized.contains(\n poly_origb, areas['RA'], areas['Dec'])\n\n return areas[idf]", "def _get_zone_number(longitude, latitude):\n\n if 56 <= latitude < 64 and 3 <= longitude < 12:\n return 32\n\n if 72 <= latitude <= 84 and longitude >= 0:\n if longitude <= 9:\n return 31\n elif longitude <= 21:\n return 33\n elif longitude <= 33:\n return 35\n elif longitude <= 42:\n return 37\n\n return int((longitude + 180) / 6) + 1", "def lookup(addr, num, street, city, code, geo_dict, failure_set):\n try:\n address_url = \"https://geocoding.geo.census.gov/geocoder/locations/address?\" + \\\n \"street=\" + str(num) + \"+\" + street.replace(\" \", \"+\") + \"&city=\" + city + \"&zip=\" + \\\n str(code) + \"&benchmark=9&format=json\"\n geo_data = json.load(req.urlopen(address_url).decode('utf-8'))['result']\n except Exception:\n try:\n address_url = \"https://geocoding.geo.census.gov/geocoder/locations/address?\" + \\\n \"street=\" + str(num) + \"+\" + street.replace(\" \", \"+\") + \"&city=\" + city + \"&zip=\" + \\\n str(code) + \"&benchmark=9&format=json\"\n geo_data = json.loads(req.urlopen(address_url).read().decode('utf-8'))['result']\n except Exception as e:\n print(e, addr)\n failure_set.add(addr)\n return None\n if len(geo_data['addressMatches']) == 0:\n print(addr, ': Failure')\n failure_set.add(addr)\n return None\n print(addr, ': Success')\n location = geo_data['addressMatches'][0]['coordinates']\n latlong = ','.join([str(location['y']), str(location['x'])])\n geo_dict[addr] = latlong\n return tuple(float(geo) for geo in latlong.split(','))", "def determine_us_state(area_code):\r\n if not isinstance(area_code, str):\r\n area_code = str(area_code)\r\n if area_code in area_code_mapping:\r\n return area_code_mapping[area_code][0]", "def find(self, number: str) -> Optional[str]:\n if number in self.data: # noqa\n return number\n else:\n return None", "def find_zip_code(x):\n i = 0\n j = 4\n for i in range(1,len(x)-6):\n string = x[i-1:i+6]\n cond = (string[1:-1].isnumeric(), not string[0].isnumeric(), not string[-1].isnumeric())\n if all(cond):\n return x[i:i+5]", "def get_location_codes(scanner, input):\n matches = scanner.search_places(input)\n codes = []\n for i in matches[\"Places\"]:\n codes.append(i[\"PlaceId\"])\n return codes" ]
[ "0.6367211", "0.6041845", "0.5946915", "0.59178984", "0.5855319", "0.57144153", "0.56851524", "0.56310326", "0.55546224", "0.55464876", "0.55371064", "0.55183727", "0.55147284", "0.53723645", "0.53643304", "0.5352899", "0.5344583", "0.53374916", "0.5331702", "0.5317512", "0.53158146", "0.53114295", "0.52758944", "0.5272143", "0.52572584", "0.52522373", "0.52153057", "0.51940775", "0.51926327", "0.51880324" ]
0.66250336
0
procures a toll free number.
def buy_toll_free_number(self, quantity=1, pattern=None, site_id=None, user_id=None): if quantity < 1: raise ValueError('Quantity can not be < 1 - passed: {}'. format(quantity)) site_id = site_id if site_id else settings.BW_SITE_ID try: toll_free_numbers = self.account_client.search_and_order_toll_free_numbers( # noqa quantity=quantity, pattern=pattern, siteid=site_id, name='SendHub Customer: {}'.format(user_id), ) except BandwidthOrderPendingException as order_id: logging.warn('Order {} is pending for a toll-free number for ' 'user: {}. Looks like bandwidth service is slow. ' 'Error out for now and nightly cleanup task ' 'will release the number.'. format(order_id, user_id)) raise BWTollFreeUnavailableError( 'Toll Free Number Order Pending: ' + SHBandwidthClient.NUMBER_UNAVAILABLE_MSG ) except BandwidthAccountAPIException as e: # If we didn't get the number, throw an error logging.error(u'buy_tollfree_phone_number(): could not get ' u'toll free number. ' u'Throwing an error - {}.'.format(e)) raise BWTollFreeUnavailableError( SHBandwidthClient.NUMBER_UNAVAILABLE_MSG ) else: if not toll_free_numbers: raise BWTollFreeUnavailableError( SHBandwidthClient.NUMBER_UNAVAILABLE_MSG ) return self._cleanup_and_return_numbers(toll_free_numbers, quantity)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def r_free(self, t):\n term1 = (self.snr.E_SN ** 3 * self.pulsar.L_0 ** 2) / (self.snr.m ** 5)\n term2 = (t * YEAR_TO_SEC) ** (6. / 5)\n return CM_TO_PC * 1.44 * term1 ** (1. / 10) * term2", "def use(self):\n if self.credit < self.price_of_trip:\n print(\"Your credit is not enough, please increase your credit\")\n else:\n self.credit -= self.price_of_trip\n print(\"Done\")", "def use(self):\n if self.price_of_trip == 0:\n print(\"Sorry your card has been used\")\n else:\n self.price_of_trip -= self.price_of_trip\n print(\"Done\")", "def toll_free(self, country_code):\r\n return AvailablePhoneNumbersTollFree(self, country_code)", "def release(self, number: int) -> None:\n if number not in self.numbers_set:\n self.numbers_q.append(number)\n self.numbers_set.add(number)", "def breakFree(self, task):\n\n # Bot Break Free\n if taskMgr.hasTaskNamed(\"Player_\" + str(self.id) + \"_AI\"):\n taskMgr.doMethodLater(self.breakFreeCt * .25, self.heldBy.actions.drop, \"Player_\" + str(self.id) + \"_AIBreakFree\", extraArgs=[1.0])\n\n # Player break free\n else:\n moveKeys = [[1,0], [-1,0]]\n\n # To break free player must press the direction keys in a sequence...\n if self.kbVal == moveKeys[self.breakFreeCt % 2]:\n self.breakFreeCt -= 1\n if self.breakFreeCt <= 0:\n self.heldBy.actions.drop(1.0)\n return task.done\n\n if taskMgr.getTasksMatching(\"Player_\" + str(self.id) + \"_MoveLoop\"):\n return task.done\n \n return task.cont", "def release(self, number: int) -> None:\n self.nums.add(number)", "def tickets(number, day, premium_seating):\n #fill in your code here. \n return 0.0", "def get_next_avail_number(self):\n area_code = int(self.conf['twilio_area_code'])\n numbers = [vbts_util.strip_number(str(n.phone_number)) for n in self.twilio_client.phone_numbers.list()]\n avail_numbers = vbts_util.get_next_avail_number(numbers)\n if (len(avail_numbers) == 0):\n # if we have none, get a new number\n new_numbers = self.twilio_client.phone_numbers.search(area_code=area_code)\n if not new_numbers:\n raise ValueError(\"No numbers available in area code %d\" % area_code)\n num = new_numbers[0]\n if not num.purchase(): # this does the buy!\n raise ValueError(\"Purchasing number failed\")\n # setup new number\n bought_num = self.twilio_client.phone_numbers.list(phone_number=num.phone_number)[0]\n bought_num.update(sms_url=self.conf['twilio_sms_url'], voice_url=self.conf['twilio_voice_url'], sms_fallback_url=self.conf['twilio_sms_fallback_url'], voice_method=self.conf['twilio_voice_method'], sms_method=self.conf['twilio_sms_method'], sms_fallback_method=self.conf['twilio_sms_fallback_method'])\n avail_numbers.append(vbts_util.strip_number(str(bought_num.phone_number)))\n\n return random.choice(avail_numbers)", "def restock(self):\n self.money = 9999", "def set_free_variable(self, t):\n self.free_variable = t\n self.solver.set_free_variable(t)", "def release(self, number):\n self.numbers.add(number)", "def search_available_toll_free_number(self, pattern=None, quantity=1):\n if quantity < 1:\n raise ValueError('Quantity can not be < 1 - passed: {}'.\n format(quantity))\n\n try:\n pattern = pattern if pattern else '8**'\n toll_free_numbers = self.account_client.search_available_toll_free_numbers( # noqa\n quantity=quantity,\n pattern=pattern\n )\n\n except BandwidthAccountAPIException as e:\n # If we didn't get the number, throw an error\n logging.error(u'search_tollfree(): could not get toll '\n u'free number. '\n u'Throwing an error - {}.'.format(e))\n raise BWTollFreeUnavailableError(\n SHBandwidthClient.NUMBER_UNAVAILABLE_MSG\n )\n\n else:\n if not toll_free_numbers:\n raise BWTollFreeUnavailableError(\n SHBandwidthClient.NUMBER_UNAVAILABLE_MSG\n )\n return self._cleanup_and_return_numbers(toll_free_numbers,\n quantity)", "def _lend(self, \n\t\t\t borrower, \n\t\t\t asked_value):\n\t\tif self.strategy == 1:\n\t\t\tloan_value = min(self.stock, asked_value)\n\t\t\tself.stock -= loan_value\n\t\t\tdebt_link = DebtLink(self, borrower, loan_value * (1.0 + self.interest_rate))\n\t\t\tself.loans.append(debt_link)\n\t\t\tborrower.debt_link = debt_link\n\t\t\treturn loan_value\n\t\telse: return 0.0", "def charge(self):\n\t\tfor l, loan in enumerate(self.loans):\n\t\t\tpayment_value = loan.borrower._pay(loan.value)\n\t\t\tloan.value -= payment_value\n\t\t\tif loan.value <= 0.0:\n\t\t\t\tloan.borrower.debt_link = None\n\t\t\t\tdel self.loans[l]\n\t\t\tself.stock += payment_value", "def take(self, desired_amount):\n if self.amount >= desired_amount:\n grab = desired_amount\n else:\n grab = min(desired_amount, self.amount)\n self.amount -= grab\n print(f\"{self} {self.amount} of supplies left\")\n return grab", "def setNumPurchased(self, val):\n self.numberPurchased = val", "def free_flight_time(self, free_flight_time):\n\n self._free_flight_time = free_flight_time", "def use(self):\n if self.flag:\n if self.credit < self.price_of_trip:\n return \"Your credit is not enough, please increase your credit\"\n else:\n self.credit -= self.price_of_trip\n return \"Done\"\n else:\n return \"Sorry, your card has expired.\"", "def deposit(amt) :\r\n\tglobal bal\r\n\tbal_in = bal\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r\n\t# (bal >= 0)\r\n\t# (bal == bal_in)\r\n\tbal = bal + amt\r\n\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t# (bal == (bal_old + amt))\r\n\t# (amt >= 0)\r\n\t# (bal_old >= 0)\r\n\t# (bal_old == bal_in)\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r", "def add_and_allocate_number(self, cb: t.Callable[[EPOLL], None]) -> int:\n number = self.next_number\n # TODO technically we should allocate the lowest unused number\n self.next_number += 1\n self.number_to_cb[number] = cb\n return number", "def charge(self,price):\n\n if price + self._balance> self._limit:\n return False\n else:\n self._balance+=price\n return True", "def add_number(self):\n # take one of the free positions in the grid at random\n x, y = random.choice(self.free_positions)\n # with the probability of Game.proba_four, put a 4 in the box. Else\n # put a 2\n if random.random() < Game.proba_four:\n self.grid[x][y] = 4\n else:\n self.grid[x][y] = 2", "def free(amounts: Dict[str, int]) -> None:\n for name, amount in amounts.items():\n assert 0 <= amount <= Resources.total[name] - Resources.available[name]\n Resources.available[name] += amount", "def _borrow(self):\n\t\tif self.debt_link == None:\n\t\t\tchoice_list = [a for s in self.site.neighbors for a in s.agents_in_site if a.stock >= (self.consumption_demanded - self.consumed)]\n\t\t\tif len(choice_list) > 0: \n\t\t\t\tchoosed = numpy.random.choice(choice_list)\n\t\t\t\tloan_value = choosed._lend(self, self.consumption_demanded - self.consumed)\n\t\t\t\tself.consumed += loan_value\n\t\t\t\tself.consumption_deficit -= loan_value", "def dec_gains_of_free_cells(self):\r\n for cell in self.cells:\r\n if not cell.locked:\r\n cell.gain -= 1\r\n cell.yank()", "def task_saleorder_update_productskustats_waitingpay_num(sku_id):\n from flashsale.pay.models import SaleOrder\n\n product_id = ProductSku.objects.get(id=sku_id).product.id\n waitingpay_num_res = SaleOrder.objects.filter(item_id=product_id, sku_id=sku_id,\n status=SaleOrder.WAIT_BUYER_PAY).aggregate(\n Sum('num'))\n total = waitingpay_num_res['num__sum'] or 0\n stat = SkuStock.get_by_sku(sku_id)\n if stat.waitingpay_num != total:\n stat.waitingpay_num = total\n stat.save(update_fields=[\"waitingpay_num\"])", "def __init__(self, number):\n self.number = number\n self.available = True", "def charge(self, price):\n '''try:\n type(price) == int or type(price) == float\n except ValueError: \n print 'Not a number!'\n \n if type(price) != int or type(price) != float:\n raise ValueError(\"Not a number!\")\n '''\n if price < 0:\n return False\n elif price + self._balance > self._limit:\n return False\n else:\n self._balance += price\n return True", "async def release(ctx, pkmn_id: int):\n res = database.release_from_party(ctx.message.author, pkmn_id)\n if res:\n tier = _get_tier(int(pkmn_id))\n money = int(_get_money_earned(tier)/3)\n message = \"Oak: Don't worry I'll take care of {} ;) \\nHere's ₱{}, buy yourself something nice.\"\\\n .format(database.get_pokemon_name(pkmn_id), money)\n database.add_pokedollars(ctx.message.author, money)\n else:\n message = \"**Oak**: Make sure you actually have that pokemon or if your party is not full ya scrub.\"\n await ctx.send(message)" ]
[ "0.62152404", "0.59016234", "0.5845852", "0.5821504", "0.5800439", "0.56484985", "0.5638114", "0.56370085", "0.5564734", "0.55455524", "0.5474723", "0.5458936", "0.5431911", "0.5408704", "0.5308613", "0.530613", "0.5302213", "0.52963436", "0.528371", "0.52709514", "0.5269176", "0.5222528", "0.5193967", "0.51853716", "0.51842207", "0.51832426", "0.51742417", "0.5170966", "0.51681507", "0.5155691" ]
0.6200835
1
verifies if number if in service
def in_service(self, number): nat_number = phonenumber_as_e164(number) nat_number = self._parse_number_to_bw_format(str(nat_number), 'US') retval = False try: self.account_client.get_phone_number(nat_number) retval = True except BandwidthAccountAPIException as e: logging.info("Phone number query: {}, caused error: {}". format(number, e)) pass return retval
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_service_name_correct(self, service):\r\n return service in self.services", "def check_for_int(check):", "def check(self, number):\n return number in self.numbers", "def test_hugepage_service_state(Service):\n\n service = Service('disable-transparent-hugepages')\n\n assert service.is_enabled\n assert service.is_running", "def is_number_correct(total):\n if int(total) < 0:\n return None\n return True", "def check(self, number: int) -> bool:\n return (number in self.numbers_set)", "def check(self, number: int) -> bool:\n return number in self.nums", "def _interesting_service(self, service: UpnpService) -> bool:\n service_type = service.service_type\n for service_types in self._SERVICE_TYPES.values():\n if service_type in service_types:\n return True\n\n return False", "def check_number(self, number):\n return (not self.whitelist_numbers or\n number in self._number_whitelist)", "def __checkProduct(self, prd, num):\n if prd not in vmdata.prdStore or not isinstance(num, int) or num < 1:\n return False \n return True", "def test_if_it_includes_a_number_if_the_number_is(self):\n self.assertNotIn(16, prime_numbers(16))", "def is_key(number):\n res = False\n if is_integer(number):\n if int(number) > 0:\n res = True\n return res", "def is_armstrong_number(number: int) -> bool:\n return get_armstrong_value(number) == number", "def test01getNumber(self):\n self.assertEqual( calc.getNumber(), 1234 )", "def is_service_installed(klass, service):\n return True", "def check_ser_presence(service, user_pass_list):\r\n\r\n for pass_info in user_pass_list:\r\n if pass_info[1] == service:\r\n return False\r\n\r\n return True", "def is_number(self) -> bool:\n return False", "def is_not_used(code):\n return 0 <= code <= 999", "def is_in_service(self) -> bool:\n return self._enabled", "def _get_service_version(service):\n\n return int(service.split(':')[4])", "def __checkCoin(self, coin, num):\n if coin not in vmdata.coinStore or not isinstance(num, int) or num < 1:\n return False\n return True", "def healthy_service(self):\n return not self.service_currently_down and not self.service_recently_down", "def __checkInput(self, var):\n try:\n int(var)\n\n except:\n return False\n\n else:\n return True", "def service_by_number(self,servicenumber):\n\t\tif servicenumber in self.service_and_number:\n\t\t\treturn self.services[self.service_and_number[servicenumber]]\n\t\treturn None", "def check_type(number):\r\n if number.find(\"i\") != -1:\r\n return 1\r\n return 0", "def istele(number):\n if number[:3] == '140':\n return True\n return False", "def instNoValid(self, iNo):\n if drawer.isInt(iNo) and iNo in self._instrNumbers: return 1\n else: return 0", "def is_service_endpoint(path):\n return re.match(r'^[a-zA-Z0-9.-]+:\\d+$', path)", "def whitelist_numbers(self):\n return getattr(self, '_do_whitelist_numbers', False)", "def is_number(G):\n return True" ]
[ "0.59083414", "0.5883824", "0.5763368", "0.5710403", "0.5586025", "0.5581765", "0.5541488", "0.5524599", "0.55024534", "0.5413349", "0.53525877", "0.53385204", "0.5334024", "0.5252063", "0.52470577", "0.52399766", "0.52303386", "0.5215163", "0.52144957", "0.5174327", "0.5161034", "0.5150379", "0.51384264", "0.51282936", "0.5116418", "0.5115396", "0.5106401", "0.5099268", "0.5092782", "0.50914633" ]
0.66074604
0
fetches media file that was part of a MMS. returns out filename or None if unable to
def get_media(self, url, out_filename=None, raw_data=False): if not raw_data: if not out_filename: out_filename = os.path.join(settings.BW_MMS_DIRECTORY, url.split('/')[-1]) if not os.path.isdir(os.path.dirname(out_filename)): raise ValueError('Invalid output directory: {} - ' 'unable to download MMS'. format(os.path.dirname(out_filename))) if os.path.isfile(out_filename): logging.info('filename {}, already exists - will be ' 'overwritten.....'.format(out_filename)) try: resp = requests.get(url, auth=(self.token, self.secret)) except requests.exceptions.RequestException as e: logging.info('Error while fetching media: {}'.format(e)) return if resp.status_code == requests.codes.ok: try: if raw_data: return resp.content else: with open(out_filename, 'wb') as fd: fd.write(resp.content) return out_filename except Exception as e: logging.info('Error: {} while writing file: {}'. format(e, out_filename)) return logging.info('Invalid URI or an error occured, response: {}, ' 'response content: {}'.format(resp.status_code, resp.text))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_media_filename(media_url):\n return media_url.split(\"/\")[-1]", "def _get_file_path(self, url):\n try:\n row = ET.fromstring(self._session.get(url, headers={\"Access-Token\":self._token}).text)[1][2][1]\n data = [row[1].text, row[1].text, row[2].text]\n if \" - S\" in data[0]:\n data[0] = data[0][0:data[1].rfind(\" - S\")]\n elif \" (\" in data[0]:\n data[0] = data[0][0:data[1].rfind(\" (\")]\n return data\n except Exception as e:\n exception_type = type(e).__name__\n print(\"Unable to get media name.\")\n print(exception_type)\n print(e)\n return None", "def file_path(self, request, response=None, info=None):\n url = request.url\n media_guid = hashlib.sha1(to_bytes(url)).hexdigest()\n media_ext = os.path.splitext(url)[1]\n if not media_ext.isalnum():\n media_ext = os.path.splitext(urlparse(url).path)[1]\n return \"full/%s%s\" % (media_guid, media_ext)", "def fetch_media(fname):\n uuid = request.args.get(\"id\")\n crop = request.args.get(\"crop\")\n # show_thumb for cropped image only\n show_thumb = request.args.get(\"full\", \"0\") == \"0\"\n fullname, mimetype = media.get_fullname(uuid)\n try:\n if crop:\n # crop dimensions are diescribed as % of width and height\n image = media.get_cropped_image(fullname, crop, show_thumb)\n logger.debug(\"-> bp.scene.routes.fetch_media cropped png\")\n # Create a png image in memery and display it\n buffer = io.BytesIO()\n image.save(buffer, format=\"PNG\")\n return Response(buffer.getvalue(), mimetype=\"image/png\")\n else:\n logger.debug(\"-> bp.scene.routes.fetch_media full\")\n return send_file(fullname, mimetype=mimetype)\n except FileNotFoundError:\n # Show default image\n ret = send_file(os.path.join(\"static\", \"image/noone.jpg\"), mimetype=mimetype)\n logger.debug(f\"-> bp.scene.routes.fetch_media none\")\n return ret", "def prepare_media(self, object):\n if object.media is not None:\n #return object.media.media_file.name\n return '/api/v1/media/{0}/'.format(object.media.id)\n else:\n return ''", "def get_media_path(self, filename):\n return join(settings.CMS_PAGE_MEDIA_PATH, \"%d\" % self.id, filename)", "async def get_file(self, link, name, md5, session):\n if os.path.exists(name) or md5 in opts.archived_md5:\n self.count += 1\n return\n\n async with session.get(link) as media:\n # Open file initially with .part suffix\n with open(f\"{name}.part\", \"wb\") as f:\n while True:\n chunk = await media.content.read(1024)\n if not chunk:\n break\n f.write(chunk)\n\n # Remove .part suffix once complete\n # After this point file won't get removed if script gets interrupted\n os.rename(f\"{name}.part\", name)\n\n if opts.archive:\n log_hash(md5)\n self.count += 1\n msg(f\"{self.fetch_progress()} {self.board}/{self.dir}/{name}\")", "def _getURL(self, params):\n qs = Media.objects.filter(pk=params['id'], deleted=False)\n if not qs.exists():\n raise Http404\n response_data = list(qs.values(*MEDIA_PROPERTIES))\n # Use 24-hour URLS\n _presign(24*3600, response_data)\n\n element = params['element']\n if element == 'auto':\n if qs[0].meta.dtype == 'video':\n element = 'streaming'\n elif qs[0].meta.dtype == 'image':\n element = 'image'\n elif qs[0].meta.dtype == 'multi':\n return None\n if element == 'audio':\n return response_data[0].get('media_files',{}).get('audio',[])[0]['path']\n elif element == 'thumbnail':\n search_in = response_data[0].get('media_files',{}).get('thumbnail',[])\n elif element == 'thumbnail_gif':\n search_in = response_data[0].get('media_files',{}).get('thumbnail_gif',[])\n elif element == 'image':\n search_in = response_data[0].get('media_files',{}).get('image',[])\n elif element == 'streaming':\n search_in = response_data[0].get('media_files',{}).get('streaming',[])\n elif element == 'archival':\n search_in = response_data[0].get('media_files',{}).get('archival',[])\n elif element == 'attachment':\n search_in = response_data[0].get('media_files',{}).get('attachment',[])\n\n if not search_in:\n return None\n quality = params['quality']\n max_delta = sys.maxsize\n quality_idx = 0\n for idx, info in enumerate(search_in):\n delta = abs(quality-info['resolution'][0])\n if delta < max_delta:\n quality_idx = idx\n max_delta = delta\n return search_in[quality_idx]['path']", "async def get_local_media(\n self, request: SynapseRequest, media_id: str, name: Optional[str]\n ) -> None:\n media_info = await self.store.get_local_media(media_id)\n if not media_info or media_info[\"quarantined_by\"]:\n respond_404(request)\n return\n\n self.mark_recently_accessed(None, media_id)\n\n media_type = media_info[\"media_type\"]\n if not media_type:\n media_type = \"application/octet-stream\"\n media_length = media_info[\"media_length\"]\n upload_name = name if name else media_info[\"upload_name\"]\n url_cache = media_info[\"url_cache\"]\n\n file_info = FileInfo(None, media_id, url_cache=bool(url_cache))\n\n responder = await self.media_storage.fetch_media(file_info)\n await respond_with_responder(\n request, responder, media_type, media_length, upload_name\n )", "def media(filename):\n media_path = flask.current_app.instance_path + '/media'\n return flask.send_from_directory(media_path, filename)", "def media_path(self):\n return self._path", "def get_object(self):\n try:\n obj = WorkoutFile.objects.get(\n Q(workout=self.kwargs['workout_id']) & Q(file=f'workouts/{self.kwargs[\"workout_id\"]}/{self.kwargs[\"filename\"]}')\n )\n except:\n raise Http404(\"Media does not exist\")\n\n self.check_object_permissions(self.request, obj)\n\n return obj", "def get_filename(target_dir, filename_prefix):\n # This whole function is not the nicest thing, but isolating it makes\n # things clearer. A good refactoring would be to get the info from the\n # video_url or the current output, to avoid the iteration from the\n # current dir.\n filenames = os.listdir(target_dir)\n for name in filenames: # Find the filename of the downloaded video\n if name.startswith(filename_prefix):\n (basename, ext) = os.path.splitext(name)\n return basename\n return None", "def retrieveURL(mw, url):\n req = urllib2.Request(url, None, {'User-Agent': 'Mozilla/5.0 (compatible; Anki)'})\n resp = urllib2.urlopen(req)\n # ct = resp.info().getheader(\"content-type\")\n filecontents = resp.read()\n # strip off any query string\n url = re.sub(r\"\\?.*?$\", \"\", url)\n path = unicode(urllib2.unquote(url.encode(\"utf8\")), \"utf8\")\n fname = os.path.basename(path)\n if not fname:\n fname = checksum(filecontents)\n return mw.col.media.writeData(unicode(fname), filecontents)", "def _get_current_media(self):\n key = int(self.status.content_id.split(\"/\")[-1])\n media_item = self.pms.fetchItem(key).reload()\n media_idx = self.status.media_custom_data.get(\"mediaIndex\", 0)\n part_idx = self.status.media_custom_data.get(\"partIndex\", 0)\n media = media_item.media[media_idx]\n part = media.parts[part_idx]\n\n return media_item, media, part", "def get_single_media(media_id):\n return query_single(media_id, Media, media_schema)", "def get_file(self, file_id):\n LOG.debug(\"Getting a file from mattermost\")\n url = '%s/api/v4/files/%s' % (self.server_url, file_id)\n LOG.debug(\"Sending: %s\", url)\n response = self._request(self._session.get, url)\n\n if response.status_code != 200:\n raise RuntimeError(\"Server unhappy. (%s)\", response)\n\n return response.content", "def download_filename_full(self, doc):\n # todo modify\n authors = \",\".join([x['name'] for x in doc.artists])\n author = re.sub(\"[\\\\\\\\/:*?\\\"<>|]\", '', authors.strip())\n mp3_name = re.sub(\"[\\\\\\\\/:*?\\\"<>|]\", '', doc['name'])\n name = os.path.join(author, \"%s - %s.mp4\" % (author, mp3_name))\n return name", "def media(self, path):\n path = \"/media/%s%s\" % (self.session.root, format_path(path))\n\n url, params, headers = self.request(path, method='GET')\n\n return self.rest_client.GET(url, headers)", "def get_demo_file(fname):\n\n d = download_demo_files()\n if fname in d:\n return d[fname]\n else:\n return None", "def get_filename( self, default=None, decode=None ):\n return self.get_param( 'filename', None, 'content-disposition', decode=decode ) \\\n or self.get_param( 'name', default, 'content-type', decode=decode )", "def _get_file_helper(self):\n page = self.course.moodle.fetch(\n self._download_url % self.id,\n None\n )\n # The resource URL should magically 303 across to the actual file\n if page.history and page.history[0].status_code == 303:\n return page, page.content\n\n # If it doesn't 303 to the actual file then there might be a download\n # link to try\n bs = bs4.BeautifulSoup(page.text, 'lxml')\n\n div = bs.find('div', class_='resourceworkaround')\n\n if div: # it's a link to the resource\n link = div.find('a').href\n\n page = self.course.moodle.fetch(\n link,\n None\n )\n return page, page.content\n\n # Perhaps it's an embedded object\n obj = bs.find('object', id='resourceobject')\n if obj:\n link = obj['data']\n\n page = self.course.moodle.fetch(\n link,\n None\n )\n return page, page.content\n\n raise ValueError(\"No idea how to get that resource\")", "def get_file_name_from_resposne(r):\n if not r: \n return None\n return get_file_name_from_cd(r.headers.get())", "def download_movie(self, filmid):\n self.logger.debug('download_movie')\n if not self._test_download_path(self.settings.getDownloadPathMovie()):\n return\n film = self.database.retrieve_film_info(filmid)\n if film is None:\n return\n (filmurl, extension,) = self._get_film_url_and_extension(film)\n # try to create a good name for the downloaded file\n namestem = mvutils.cleanup_filename(film.title)[:80]\n if not namestem:\n # try to take the show name instead...\n namestem = mvutils.cleanup_filename(film.show)[:64]\n if not namestem:\n namestem = u'Film'\n namestem = namestem + '-{}'.format(film.filmid)\n elif self.settings.getMovieNameWithShow():\n showname = mvutils.cleanup_filename(film.show)[:64]\n if showname:\n namestem = showname + ' - ' + namestem\n # review name\n if self.settings.getReviewName():\n (namestem, confirmed) = self.notifier.get_entered_text(namestem, 30986)\n namestem = mvutils.cleanup_filename(namestem)\n if len(namestem) < 1 or confirmed is False:\n return\n # determine destination path and film filename\n if self.settings.getUseMovieFolder():\n pathname = self.settings.getDownloadPathMovie() + namestem + '/'\n filename = namestem\n else:\n pathname = self.settings.getDownloadPathMovie()\n filename = namestem\n # check for duplicate\n # keep\n if self.settings.getFileExistsAction() == 1 and xbmcvfs.exists(pathname + filename + extension):\n return\n # prompt\n if self.settings.getFileExistsAction() == 0:\n while xbmcvfs.exists(pathname + filename + extension):\n (filename, confirmed) = self.notifier.get_entered_text(filename, 30987)\n filename = mvutils.cleanup_filename(filename)\n if len(filename) < 1 or confirmed is False:\n return\n\n # download the stuff\n if self._download_files(film, filmurl, pathname, filename, extension):\n self._make_movie_nfo_file(film, filmurl, pathname, filename)\n else:\n self.logger.debug('download_movie ERROR')", "def get_media_directory():\n\treturn _paths[_MEDIA_DIRECTORY_KEY]", "def get_media_id(media_url):\n split_url = media_url.split(\"/\")\n #Media urls of the format https://messaging.bandwidth.com/api/v2/users/123/media/file.png\n if split_url[-2] == \"media\":\n return split_url[-1]\n #Media urls of the format https://messaging.bandwidth.com/api/v2/users/123/media/abc/0/file.png\n else:\n #This is required for now due to the SDK parsing out the `/`s\n return \"%2F\".join(split_url[-3:])", "def get_file():\n fname = get_var(request, \"fname\")\n return open(fname).read()", "def get_mediafile_blob_data(self, old):\n if old[\"is_directory\"]:\n return None\n\n try:\n db_mediafile = Mediafile.objects.get(pk=old[\"id\"])\n except Mediafile.DoesNotExist:\n return None\n filename = db_mediafile.original_filename\n\n if use_mediafile_database:\n with connections[\"mediafiles\"].cursor() as cursor:\n cursor.execute(\n f\"SELECT data FROM {mediafile_database_tablename} WHERE id = %s\",\n [old[\"id\"]],\n )\n row = cursor.fetchone()\n if row is None:\n return None\n data = row[0]\n else:\n data = db_mediafile.mediafile.open().read()\n\n blob = base64.b64encode(data).decode(\"utf-8\")\n return filename, len(data), blob", "def media_content_id(self) -> str | None:\n if self._device.movie.handle:\n return self._device.movie.handle\n return None", "async def _get_remote_media_impl(\n self, server_name: str, media_id: str\n ) -> Tuple[Optional[Responder], dict]:\n media_info = await self.store.get_cached_remote_media(server_name, media_id)\n\n # file_id is the ID we use to track the file locally. If we've already\n # seen the file then reuse the existing ID, otherwise generate a new\n # one.\n\n # If we have an entry in the DB, try and look for it\n if media_info:\n file_id = media_info[\"filesystem_id\"]\n file_info = FileInfo(server_name, file_id)\n\n if media_info[\"quarantined_by\"]:\n logger.info(\"Media is quarantined\")\n raise NotFoundError()\n\n if not media_info[\"media_type\"]:\n media_info[\"media_type\"] = \"application/octet-stream\"\n\n responder = await self.media_storage.fetch_media(file_info)\n if responder:\n return responder, media_info\n\n # Failed to find the file anywhere, lets download it.\n\n try:\n media_info = await self._download_remote_file(\n server_name,\n media_id,\n )\n except SynapseError:\n raise\n except Exception as e:\n # An exception may be because we downloaded media in another\n # process, so let's check if we magically have the media.\n media_info = await self.store.get_cached_remote_media(server_name, media_id)\n if not media_info:\n raise e\n\n file_id = media_info[\"filesystem_id\"]\n if not media_info[\"media_type\"]:\n media_info[\"media_type\"] = \"application/octet-stream\"\n file_info = FileInfo(server_name, file_id)\n\n # We generate thumbnails even if another process downloaded the media\n # as a) it's conceivable that the other download request dies before it\n # generates thumbnails, but mainly b) we want to be sure the thumbnails\n # have finished being generated before responding to the client,\n # otherwise they'll request thumbnails and get a 404 if they're not\n # ready yet.\n await self._generate_thumbnails(\n server_name, media_id, file_id, media_info[\"media_type\"]\n )\n\n responder = await self.media_storage.fetch_media(file_info)\n return responder, media_info" ]
[ "0.6806104", "0.66842276", "0.63569075", "0.63093585", "0.6236515", "0.6230892", "0.6010185", "0.5981271", "0.5938037", "0.591105", "0.59071726", "0.5906296", "0.5849572", "0.5843085", "0.58247674", "0.58222777", "0.57600147", "0.57513124", "0.5730131", "0.571751", "0.57169527", "0.56973904", "0.56919014", "0.56864756", "0.56812567", "0.5667435", "0.5660215", "0.5643066", "0.5631143", "0.55902416" ]
0.7086234
0
function to return keys from my_dict for any value return empty list if no value
def get_keys(my_dict, val): keys=[] for key, value in my_dict.items(): if val == value: keys.append(key) return keys
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dict_keys(d):\n return list(d.keys())", "def keys(self):\n return [entry.key for entry in self.table if entry.value is not None]", "def select_keys(my_dict: Dict, keys: Sequence) -> Dict:\n keyset = set(keys)\n return {k: v for k, v in my_dict.items() if k in keyset}", "def getall(self, key):\n return self.values.get(key, [])", "def key_list(dict):\n list = []\n for key in dict:\n list.append(key)\n return list", "def get_keys(in_data: Any) -> List:\n if np.isscalar(in_data) or in_data is None:\n return []\n try:\n return list(in_data.keys()) + flatten([get_keys(v) for v in in_data.values()])\n except AttributeError:\n # some sort of list like iterable\n return flatten([get_keys(x) for x in in_data])", "def _get_datatypes(input_dict):\n return set(filter(None.__ne__, set(input_dict.keys())))", "def get_all_keys(self):\r\n all_keys = []\r\n for i in range(len(self.hash_table)):\r\n if self.hash_table[i] is not None:\r\n all_keys.append(self.hash_table[i].key)\r\n return all_keys", "def remove_empty_list(dictionary):\n\n return {k: v for k, v in dictionary.items() if v != []}", "def default_dict_keys():\n return defaults_dict.keys()", "def filter_valid_values(dictionary):\n return ((key, value)\n for key, value in six.iteritems(dictionary)\n if value is not None)", "def return_keys(tsd):\n return list(tsd.keys())", "def func4(key):\n return key in list(my_test_dict.keys())", "def _get_keys(dict_, t):\n return {k for (k, v) in dict_.items() if v >= t}", "def remove_empty_values(_dict):\n return {k: v for k, v in list(_dict.items()) if v is not None}", "def getKeysByValue(dictOfElements, valueToFind):\n listOfKeys = list()\n listOfItems = dictOfElements.items()\n for item in listOfItems:\n if valueToFind in item[1]:\n listOfKeys.append(item[0])\n return listOfKeys", "def items(value):\n if value is None:\n # `{% for k, v in value.items %}` doesn't raise when value is None or\n # not in the context, so neither should `{% for k, v in value|items %}`\n return []\n return value.items()", "def get_dictionary_keys(self,dictionary):\r\n return sorted(dictionary)", "def GetDictionaryKeys(value, keys):\n return {key: value[key] for key in keys if key in value}", "async def get_keys(self):\n return self.dict.keys()", "def keys(self):\n return [kvp.key for kvp in self.keyvaluepair_set.all()]", "def get_key_by_value(needvalue, mydict):\n return [key for key, value in mydict.iteritems() if value == needvalue][0]", "def _GetKeys(data):\n if hasattr(data, 'keys'):\n return set(data.keys())\n elif hasattr(data, 'dtype') and data.dtype.names:\n return set(data.dtype.names)\n else:\n return None", "def _key_vals(dict_):\n return [(key, val) for key, val in dict_.iteritems()]", "def _keys(obj):\n \n k = []\n \n if len(obj) > 0:\n # There is at least one thing\n for x in obj:\n # Make sure keys() is defined\n if hasattr(x, 'keys'):\n \n k.extend(x.keys())\n \n k = list(set(k))\n k.sort()\n \n return k", "def keyValues(self): # real signature unknown; restored from __doc__\n return []", "def quantize_key_values(key):\n if isinstance(key, dict):\n return key.keys()\n\n return key", "def keys(self) -> List:\n pass", "def get_all_keys(self):\n r = []\n with self.lock:\n for key in self.keys():\n if self.get(key):\n r.append(key)\n\n return r", "def get_all_keys(\n cls, hierarchical_dict: dict, include_values: bool = False\n ) -> Union[List[str], dict]:\n all_keys = {}\n for key in hierarchical_dict:\n if isinstance(hierarchical_dict[key], dict):\n all_sub_keys = FuseUtilsHierarchicalDict.get_all_keys(\n hierarchical_dict[key], include_values=True\n )\n keys_to_add = {\n f\"{key}.{sub_key}\": all_sub_keys[sub_key]\n for sub_key in all_sub_keys\n }\n all_keys.update(keys_to_add)\n else:\n all_keys[key] = hierarchical_dict[key]\n if include_values:\n return all_keys\n else:\n return list(all_keys.keys())" ]
[ "0.66081107", "0.6273039", "0.62509716", "0.6220051", "0.62197506", "0.61547536", "0.61087847", "0.6104465", "0.60851574", "0.6081447", "0.60670567", "0.60422623", "0.5984794", "0.5969637", "0.59643", "0.5940668", "0.59390646", "0.5931389", "0.59045744", "0.58952636", "0.58745086", "0.58451664", "0.5822645", "0.58199376", "0.5813466", "0.5803009", "0.5800547", "0.57994837", "0.57912606", "0.5764538" ]
0.7193149
0
from a list of tuples, return one np.array which sums of items is the minimum
def get_minimum_tuple(liste_of_tuple): sum_of_tuple=0 given_array=0 for tup in liste_of_tuple: arr = np.array(tup) if (sum_of_tuple == 0): sum_of_tuple = np.sum(arr) if (np.sum(arr))<=sum_of_tuple: given_array = arr return given_array
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def argmin(self, values: pdarray) -> Tuple[groupable, pdarray]:\n k, v = self.aggregate(values, \"argmin\")\n return k, cast(pdarray, v)", "def pmin(\n *x: NumericType,\n na_rm: bool = False\n) -> Iterable[float]:\n maxlen = max(map(length_of, x))\n x = (recycle_value(elem, maxlen) for elem in x)\n return Array([min(elem, na_rm=na_rm) for elem in zip(*x)])", "def compare_min(values, weights):\n return np.min(values.numpy())", "def argmin(self, values):\n return self.aggregate(values, \"argmin\")", "def calc_min(data: list) -> float:\n acc = data[0]\n for n in data:\n if n < acc:\n acc = n\n return float(acc)", "def expanding_min_1d_nb(a, minp=1):\n out = np.empty_like(a, dtype=np.float_)\n minv = a[0]\n cnt = 0\n for i in range(a.shape[0]):\n if np.isnan(minv) or a[i] < minv:\n minv = a[i]\n if ~np.isnan(a[i]):\n cnt += 1\n if cnt < minp:\n out[i] = np.nan\n else:\n out[i] = minv\n return out", "def min(*x, na_rm: bool = False) -> Any:\n fun = numpy.nanmin if na_rm else numpy.min\n x = Collection(*x) # flatten\n return fun(x)", "def arrmin(a):\n # could set arrmin = amin in scipy if scipy is installed\n try:\n return min(a.flat)\n except AttributeError:\n # not a NumPy array\n if isinstance(a, collections.Sequence):\n return min(a)\n elif isinstance(a, numbers.Number):\n return a\n else:\n raise TypeError('arrmin of %s not supported' % type(a))", "def min(self, values: pdarray, skipna: bool = True) -> Tuple[groupable, pdarray]:\n if values.dtype == bool:\n raise TypeError(\"min is only supported for pdarrays of dtype float64, uint64, and int64\")\n k, v = self.aggregate(values, \"min\", skipna)\n return k, cast(pdarray, v)", "def smallestValue(self, nd1, nd2):\r\n minnd1 = min(nd1.values())\r\n minnd2 = min(nd2.values())\r\n totalmin = min(minnd1,minnd2)\r\n return totalmin", "def get_minimum_value_from_list(self, list_):\r\n return min(list_)", "def min(self, values):\n return self.aggregate(values, \"min\")", "def get_min(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n return min(data)", "def _argmin(a, positions, shape, dtype):\n\n result = numpy.empty((1,), dtype=dtype)\n\n pos_nd = numpy.unravel_index(positions[numpy.argmin(a)], shape)\n for i, pos_nd_i in enumerate(pos_nd):\n result[\"pos\"][0, i] = pos_nd_i\n\n return result[0]", "def fmin(items):\n if len(items) == 0:\n return 0.\n\n return min(items)", "def _findMin(p, A):\n\n m=(-1, (0,0))\n for p0 in A:\n dist = np.linalg.norm(p0-np.array(p))\n if m[0]==-1 or m[0]>dist:\n m = (dist, p0)\n \n return tuple(m[1])", "def find_minima_via_projections(line, arr, weight):\n top_pt = weight*line\n low_pt = -weight*line\n x_line = top_pt - low_pt\n\n projs = np.zeros((arr.shape[0],), dtype=float)\n for i, pt in enumerate(arr):\n vec = pt - low_pt\n projs[i] = project_vectors_ab(vec, x_line)\n\n return np.argmin(projs)", "def nanmin_nb(a):\n out = np.empty(a.shape[1], dtype=np.float_)\n for col in range(a.shape[1]):\n out[col] = np.nanmin(a[:, col])\n return out", "def min():\n\n # check if collection passed to process() so far is empty\n assert len(inlist) > 0, \"process() has empty collection\"\n\n # assign tmp the first val inside collection \n tmp = inlist[0]\n # for loop to iterate through collection to find minimum \n for item in inlist:\n if item < tmp:\n tmp = item \n return tmp # return the minimum of all int", "def expanding_min_nb(a, minp=1):\n out = np.empty_like(a, dtype=np.float_)\n for col in range(a.shape[1]):\n out[:, col] = expanding_min_1d_nb(a[:, col], minp=minp)\n return out", "def min(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n return _numeric_combine(x, np.min, reduce_instance_dims, name)", "def MIN(*args):\n return _group_function(min, *args)", "def get_minimum():\n return [\n convert_variables([0.78547, 0.78547, 0.78547]),\n ]", "def smallest_impossible_sum(arr:list):\n smallest_sum = 1\n for i in arr:\n if i > smallest_sum:\n return smallest_sum\n else:\n smallest_sum += i\n\n return smallest_sum", "def arrmin(a):\n try:\n return a.min()\n except AttributeError:\n # not a NumPy array\n if isinstance(a, collections.Sequence):\n return min(a) # does not work for nested sequences\n elif isinstance(a, numbers.Number):\n return a\n else:\n raise TypeError('arrmin of %s not supported' % type(a))", "def arrmin(a):\n try:\n return a.min()\n except AttributeError:\n # not a NumPy array\n if isinstance(a, collections.Sequence):\n return min(a) # does not work for nested sequences\n elif isinstance(a, numbers.Number):\n return a\n else:\n raise TypeError('arrmin of %s not supported' % type(a))", "def matrix_min(data):\n if is_SparseDataFrame(data):\n data = [np.min(data[col]) for col in data.columns]\n elif is_sparse_dataframe(data):\n data = [sparse_series_min(data[col]) for col in data.columns]\n elif isinstance(data, pd.DataFrame):\n data = np.min(data)\n elif isinstance(data, sparse.lil_matrix):\n data = [np.min(d) for d in data.data] + [0]\n elif isinstance(data, sparse.dok_matrix):\n data = list(data.values()) + [0]\n elif isinstance(data, sparse.dia_matrix):\n data = [np.min(data.data), 0]\n return np.min(data)", "def getMin(listOfVCFs, compareFun, numMins = 1):\n # \"\"\"Returns the numMin keys with smallest values in the list\"\"\"\n return min(listOfVCFs, key = compareFun)", "def sim_min(sim_mats):\n return np.array(sim_mats).min(axis=0)", "def minimum_value(drawbles):\n # Loop over histograms\n result = 10**20 \n for drawable in drawbles:\n # Unpack things if there is an error band\n if isinstance(drawable, tuple):\n drawable, error_band = drawable\n else:\n error_band = None\n\n if is_histo(drawable):\n MIN_VAL = 0.0 # found minimum must be larger than 0\n minimum = drawable.GetMinimum(MIN_VAL) \n elif is_stack(drawable):\n minimum = drawable.GetMinimum(\"nostack\")\n elif is_graph(drawable):\n minimum = TMath.MinElement(drawable.GetN(), drawable.GetY())\n elif is_line(drawable):\n minimum = min(drawable.GetY1(), drawable.GetY2()) # not tested\n else:\n print type(drawable)\n raise ValueError('unsupported drawable type')\n\n if minimum < 0.0: continue\n\n # Update the result\n result = min(result, minimum)\n\n return result" ]
[ "0.6525309", "0.6434365", "0.61789453", "0.5923437", "0.5893343", "0.5817158", "0.5815836", "0.58061725", "0.57888776", "0.57280415", "0.569099", "0.5679934", "0.5679168", "0.5647516", "0.563053", "0.55849946", "0.5577152", "0.55327713", "0.55204874", "0.55086505", "0.5495106", "0.54770166", "0.54592025", "0.5452118", "0.5426918", "0.5426918", "0.5420645", "0.541343", "0.5413211", "0.54115194" ]
0.7566244
0
Updates the current configuration with a new object.
def update(self, obj): self.cfg.update(obj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conf_update(self):\n pass", "def update(self, config_dict):\r\n self._update(config_dict, allow_new_keys=True)", "def update(self, config_dict):\n self._update(config_dict, allow_new_keys=True)", "def update(self):\n self.save_config_file()", "def configure(self, config: dict):\n self.config.update(config)", "def configure(new_config: Mapping):\n config.update(new_config)", "def _auto_update_configuration(self) -> None:\n self.config = rasa.utils.train_utils.update_confidence_type(self.config)\n rasa.utils.train_utils.validate_configuration_settings(self.config)\n self.config = rasa.utils.train_utils.update_similarity_type(self.config)\n self.config = rasa.utils.train_utils.update_evaluation_parameters(self.config)", "def update(self, enabled=None, cnames=None, comment=None):\r\n new_config = DistributionConfig(self.connection, self.config.origin,\r\n self.config.enabled, self.config.caller_reference,\r\n self.config.cnames, self.config.comment,\r\n self.config.trusted_signers,\r\n self.config.default_root_object)\r\n if enabled != None:\r\n new_config.enabled = enabled\r\n if cnames != None:\r\n new_config.cnames = cnames\r\n if comment != None:\r\n new_config.comment = comment\r\n self.etag = self.connection.set_distribution_config(self.id, self.etag, new_config)\r\n self.config = new_config\r\n self._object_class = Object", "def update_config(self, config):\n return self._update_config(\"config\", config)", "def update_config(self, config):\n self.config = {\n \"display_name\": \"\",\n \"description\": \"\",\n \"required\": 1,\n \"type\": \"string\"\n }\n self.config.update(config)\n self.API_KEY = self.config['key']", "def update_config(self, config):\n self.config = {\n \"display_name\": \"\",\n \"description\": \"\",\n \"required\": 1,\n \"type\": \"string\"\n }\n self.config.update(config)\n self.API_KEY = self.config['key']", "def with_config_update(self):\n original_config = self.load_config()\n\n config_data = original_config.json\n if str(self.ITEM_PUBLIC_ID) in config_data[f\"{self.ITEM_TYPE}s\"]:\n config_data[f\"{self.ITEM_TYPE}s\"].remove(str(self.ITEM_PUBLIC_ID))\n config_data[f\"{self.ITEM_TYPE}s\"].append(\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:0.0.1\"\n )\n self.dump_config(AgentConfig.from_json(config_data))\n try:\n yield\n finally:\n self.dump_config(original_config)", "def update(self, config):\n if not isinstance(config, dict):\n raise ValueError(\"Argument `config` should be dictionary\")\n self.__data.update(config)", "def _update(self, config_dict, allow_new_keys=True):\r\n if not config_dict:\r\n return\r\n\r\n for k, v in six.iteritems(config_dict):\r\n if k not in self.__dict__:\r\n if allow_new_keys:\r\n self.__setattr__(k, v)\r\n else:\r\n raise KeyError('Key `{}` does not exist for overriding. '.format(k))\r\n else:\r\n if isinstance(self.__dict__[k], Config) and isinstance(v, dict):\r\n self.__dict__[k]._update(v, allow_new_keys)\r\n elif isinstance(self.__dict__[k], Config) and isinstance(v, Config):\r\n self.__dict__[k]._update(v.as_dict(), allow_new_keys)\r\n else:\r\n self.__setattr__(k, v)", "def update_config(self, data):\n self.config.data = dict_merge(self.config.data, data)\n self.config.save()", "def refresh_configuration(self):\n pass", "def reload(self):\n self.load_config()\n # Seems we need to explicitly refresh this\n if self.main_instance:\n self.main_instance.config = self.config", "def update_from_obj(self, obj, copy=False):\n obj.clean()\n obj_config = obj._config\n all_props = self.__class__.CONFIG_PROPERTIES\n if copy:\n for key, value in six.iteritems(obj_config):\n attr_config = all_props.get(key)\n if attr_config:\n attr_type = attr_config.attr_type\n if attr_type:\n if issubclass(attr_type, list):\n self._config[key] = value[:]\n elif attr_type is dict:\n self._config[key] = value.copy()\n else:\n self._config[key] = value\n self._modified.discard(key)\n else:\n filtered_dict = {key: value\n for key, value in six.iteritems(obj_config)\n if key in all_props}\n self._config.update(filtered_dict)\n self._modified.difference_update(filtered_dict.keys())", "def _update(self, config_dict, allow_new_keys=True):\n if not config_dict:\n return\n\n for k, v in six.iteritems(config_dict):\n if k not in self.__dict__.keys():\n if allow_new_keys:\n self.__setattr__(k, v)\n else:\n raise KeyError('Key `{}` does not exist for overriding. '.format(k))\n else:\n if isinstance(v, dict):\n self.__dict__[k]._update(v, allow_new_keys)\n else:\n self.__dict__[k] = copy.deepcopy(v)", "def config_update(cls, **options) -> None:\n cls._logger.debug(\"[%s]: Update config from kwargs.\", cls.__name__)\n\n config_update: Dict = {k: options[k] for k in options.keys() if \"graph_\" in k}\n\n cls._config.update(config_update)\n\n cls._logger.debug(\"[%s]: Final config: %s\", cls.__name__, cls._config)", "def update(self, other: Mapping[str, Any]) -> None:\n self._config.update(self._flatten_dict(other))", "def updateConfig(self):\n # Make sure to keep the default values in place.\n if self.newConfig['sensor'] == 0:\n self.newConfig['sensor'] = self.config['sensor']\n if self.newConfig['camera'] == 0:\n self.newConfig['camera'] = self.config['camera']\n if not self.newConfig['auto']['times']:\n self.newConfig['auto']['times'] = self.config['auto']['times']\n if not self.newConfig['auto']['days']:\n self.newConfig['auto']['days'] = self.config['auto']['days']\n\n # Show the changes.\n if self.verbosity >= 1:\n print('%s: Updating configuration file...' % self.feederName)\n try:\n for key in self.config.keys():\n if type(self.config[key]) is dict:\n for subkey in self.config[key].keys():\n if self.config[key][subkey] != self.newConfig[key][subkey]:\n print('%s: Updating %s from %s to %s.' % (self.feederName, subkey, self.config[key][subkey], self.newConfig[key][subkey]))\n elif self.config[key] != self.newConfig[key]:\n print('%s: Updating %s from %s to %s.' % (self.feederName, key, self.config[key], self.newConfig[key]))\n except ValueError:\n if self.verbosity >= 1:\n print('%s: Configuration file does not contain a valid JSON object.' % self.feederName)\n if self.verbosity == 2:\n print('%s: Overwriting configuration file to: %s.' % (self.feederName, self.config))\n\n # Change the configuration file.\n self.config = self.newConfig\n self.writeConfig()", "def updated(self, newConfiguration):\n log.debug('ConfigListener: configuration %s updated' % newConfiguration)", "def set_config(self, aConfig):\n \n # we update the dict of the existing config with the passed\n # parameter. This means that the new config is merged with\n # the old, but all new members overwrite old one. This is\n # more robust.\n self._config.__dict__.update(aConfig.__dict__)\n # apply the config to the underlying logic\n self.config_to_logic()\n # bring it back all the way up to the view\n self.logic_to_config()\n\n # but only if we are in view mode\n if self.view_initialised:\n self.config_to_view()\n\n # the config has been set, so we assumem that the module has\n # now been modified. \n self._module_manager.modify_module(self)", "def update_config(self, config):\n self.config = {\n \"key\": \"\",\n \"display_name\": \"\",\n \"description\": \"\",\n \"required\": 1,\n \"type\": \"string\"\n }\n self.config.update(config)\n self.API_KEY = self.config['github_api_key']", "def update_config(self, kv: dict):\n self._configs.update(kv)\n self._save()", "def update_config(self, update_dict):\n self.config = recursive_merge_dicts(self.config, update_dict)", "def config(self, cfg):\n self._config.update(cfg)\n return self", "def _do_update(self, meta, k, v):\n self.runtime.logger.info('{}: [{}] -> {}'.format(meta.in_group_config_path, k, v))\n meta.config[k] = v\n meta.save()", "def _overwrite_with_config(self, new_cfg):\n for section in new_cfg.sections():\n for key, val in new_cfg.items(section):\n self.config.set(section, key, val)" ]
[ "0.7416382", "0.7137878", "0.6981426", "0.697743", "0.69123477", "0.68403643", "0.68191355", "0.6810213", "0.6806464", "0.67775714", "0.67775714", "0.674697", "0.67232466", "0.6714576", "0.6706718", "0.67066795", "0.6673145", "0.6670666", "0.66530377", "0.6601052", "0.65788263", "0.65246797", "0.64543474", "0.6452609", "0.64473975", "0.6422319", "0.6407904", "0.6407567", "0.6383086", "0.6360531" ]
0.8169484
0
Get a resource from property Resources of config.
def resource(self, n): cfg = self.read() for res in cfg.get('Resources', []): res_name = res.get('Resource') if res_name == n: return ConfigResource(res)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetResource(self, name):\r\n matches = [x for x in self.resources if x.name == name]\r\n if len(matches) == 1:\r\n return matches[0]\r\n elif len(matches) > 1:\r\n raise errors.ConfigError(\"Multiple resources with the name [%s]\" % name)\r\n else:\r\n return None", "def get_resource_config(target=False, force=None):\n return get_stored_property(ctx, 'resource_config', target, force)", "def resource(self):\n return self.properties.get('resource',\n Entity(self.context, ResourcePath(\"resource\", self.resource_path)))", "def __getitem__(self, name):\n return self.__resources[name]", "def load_resource_config(config_path):\n return load_json_file(config_path)", "def getResource(self):\n return self.__resource;", "def getResource(self):\n\n return self.__resource;", "def getResource(self, resourceName, default=None, useCache=True, **kwargs):\n logger.debug(\"Requesting resource %r\", resourceName)\n if resourceName in self.__resourcesD:\n return self.__resourcesD[resourceName](self.__cfgOb, self.__configName, self.__cachePath, useCache=useCache, **kwargs)\n else:\n logger.error(\"Request for unsupported resource %r returning %r\", resourceName, default)\n #\n return default", "def _get_resource_property(self, resource_name, property_name, default_value=None):\n if resource_name == \"ExperimentDb\":\n return self._get_experiment_db_property(property_name, default_value)\n elif resource_name == \"ModelDb\":\n return self._get_model_db_property(property_name, default_value)\n elif resource_name == \"JoinDb\":\n return self._get_join_db_property(property_name, default_value)\n elif resource_name == \"IAMRole\":\n return self._get_iam_role_property(property_name, default_value)\n else:\n return None", "def get(self, resource, default=0):\n return getattr(self._resources, resource, default)", "def get_resource_from_name(name):\n return _name_to_resources.get(name, None)", "def _get_resource(self, label: str, source: dict, resource_type: str):\r\n try:\r\n return source[label]\r\n except KeyError:\r\n raise ValueError(\"Cannot find {0} with label '{1}'.\\nExisting {0} labels: {2}\".format(\r\n resource_type, label, list(source.keys())))", "def get_resource_url(self, resource_name):\r\n return self.__resource_meta.get(resource_name,{}).get(\"resource\", None)", "def lookup(self, name):\n for config in self._config:\n if name in config:\n return config[name]\n raise AttributeError(\"%s not found\" % name)", "def getResource(self):\n return self.serviceClass.app.resource()", "def resources(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"resources\")", "def resources(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"resources\")", "def resources(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"resources\")", "def resources(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"resources\")", "def getResource(self):\n pass;", "def resources(self):\n return self.__resources", "def getResource(resname, loc = None):\n # check the HOME for personal config file\n prv_filename = os.path.join(os.getenv(\"HOME\"), \".aphla\", resname)\n if os.path.exists(prv_filename):\n return prv_filename\n elif loc and resource_exists(loc, resname):\n # use the config within distribution\n return resource_filename(loc, resname)\n else:\n return None", "def resources(self) -> Sequence['outputs.GetResourcesResourceResult']:\n return pulumi.get(self, \"resources\")", "async def get_resource(self, resource_id: int) -> dict:\n resource = await self.request.get(\n join_path(self._base_path, str(resource_id))\n )\n self._sanitize_resource(self._get_to_actual_data(resource))\n return resource", "def get_resource_from_class(klass):\n return _class_to_resources.get(klass, None)", "def properties(self):\r\n return resources.Properties(self)", "def get(owner_name, resource_name):\n resource = get_node(owner_name, resource_name)\n return resource if isinstance(resource, Resource) else None", "def resource_reference(self):\n return self.properties.get(\"resourceReference\", ResourceReference())", "def get_resource(res_name, res_type=\"icons\"):\n own_path = os.path.dirname(__file__)\n resource_path = os.path.abspath(os.path.join(own_path, os.pardir, \"resources\", res_type))\n return os.path.join(resource_path, res_name)", "def get_resource(self, name: str) -> ResourceBase:\n resource = self.get_payload(name)\n if not isinstance(resource, ResourceBase):\n raise TypeError(\"Resource was expected but not found\")\n return resource" ]
[ "0.7069354", "0.65950155", "0.63619065", "0.62713015", "0.6215465", "0.620397", "0.61216813", "0.60930693", "0.6085507", "0.5970844", "0.59512824", "0.58540356", "0.58053327", "0.5800865", "0.5777375", "0.57626045", "0.57626045", "0.57626045", "0.57626045", "0.5752824", "0.5709954", "0.5694906", "0.5672997", "0.5650158", "0.5619895", "0.5608228", "0.5587043", "0.55516213", "0.5542972", "0.5537923" ]
0.75324243
0
Does the bot save images to imgs folder?
def test_postImages(self): # GIVEN the group chat has at least one image testBot = bot.Bot(os.environ['bot_id'], os.environ['token'], os.environ['group_ID']) testBot.run() #THEN the bot should save images from the group to the imgs folder self.assertTrue(len(os.listdir('./imgs')) > 0) #AND there should be at least one image in the folder
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_save_images(self):\n save_file(self.quart.save_images, to_single_file=False)", "def save_imgs(self):\n print(\"Saving the images with required categories ...\")\n os.makedirs(self.imgs_dir, exist_ok=True)\n # Save the images into a local folder\n for im in tqdm(self.images):\n img_data = requests.get(im['coco_url']).content\n with open(os.path.join(self.imgs_dir, im['file_name']), 'wb') as handler:\n handler.write(img_data)", "def images_exist(self):\n pass", "def saveImages(self):\n if self.imageLink and not self.image:\n f= open('imageLink.txt','wb')\n f.write(bytes(str(self.imageLink), 'UTF-8'))\n f.close()\n req = urllib3.urlopen(self.imageLink, headers={'User-Agent': 'Mozilla/5.0'})\n result = urllib3.urlopen(req)\n self.image.save(os.path.basename(self.imageLink),ContentFile(result.read()))\n print (\"Image saved\")\n self.save()", "def _save_images(self, \n img_urls, \n data_path, \n address):\n\n try:\n # if address is invalid, discontinue the process\n if not address:\n return 0\n\n # this is the path we want the OS to come back\n # when it finishes the image saving tasks\n current_path = os.getcwd()\n os.chdir(data_path)\n \n # create a folder for the apartment if it doesn't\n # exist inside the section folder\n if not os.path.exists(address):\n os.mkdir(address)\n os.chdir(address)\n\n # write images inside the apartment folder\n for i, img_url in enumerate(img_urls):\n browser = self._browser\n browser.get(img_url)\n browser.save_screenshot(f'img{i}.jpg')\n \n os.chdir(current_path)\n return 1\n except:\n os.chdir(current_path)\n return 0", "def _save_images(self, \n img_urls, \n data_path, \n address):\n\n try:\n # if address is invalid, discontinue the process\n if not address:\n return 0\n\n # this is the path we want the OS to come back\n # when it finishes the image saving tasks\n current_path = os.getcwd()\n os.chdir(data_path)\n \n # create a folder for the apartment if it doesn't\n # exist inside the section folder\n if not os.path.exists(address):\n os.mkdir(address)\n os.chdir(address)\n\n # write images inside the apartment folder\n for i, img_url in enumerate(img_urls):\n browser = self._browser\n browser.get(img_url)\n browser.save_screenshot(f'img{i}.jpg')\n \n os.chdir(current_path)\n return 1\n except:\n os.chdir(current_path)\n return 0", "def save_images(images, db, path):\n images = [int(image) for image in images]\n files = get_img_files(images, db)\n copy_files(files, path)", "def save_test_images(images):\n for description, img in images.items():\n save_to_image(img, description)\n save_to_netcdf(img, description)", "def _save_images(self, \n img_urls, \n data_path, \n address):\n\n try:\n # if address is invalid, discontinue the process\n if not address:\n return 0\n\n # this is the path we want the OS to come back\n # when it finishes the image saving tasks\n current_path = os.getcwd()\n os.chdir(data_path)\n \n # create a folder for the apartment if it doesn't\n # exist inside the section folder\n if not os.path.exists(address):\n os.mkdir(address)\n os.chdir(address)\n\n # write images inside the apartment folder\n for i, img_url in enumerate(img_urls):\n img_data = requests.get(img_url).content\n with open(f'img{i}.jpg', 'wb') as handler:\n handler.write(img_data)\n \n os.chdir(current_path)\n return 1\n except:\n os.chdir(current_path)\n return 0", "def save_processed_images(exp_dir, img_dict):\n # save them into a directory called \"processed\"\n img_fname = os.path.join(exp_dir, str(experiment) + '_processed.jpg')", "def save_images(self, step, images):\n\n # Save\n with self.summary_writer.as_default():\n for name, batch in images.items():\n image = batch[0]\n image = tf.expand_dims(image, axis=0)\n tf.summary.image(name, image, step)", "def getimgs():", "def save_output_image_to_directory(self):\n curr_directory = os.path.dirname(os.path.abspath(__file__))\n images_dir = curr_directory + \"/images/\"\n if not os.path.exists(images_dir):\n os.makedirs(images_dir)\n self.output_image_name = md5(str(uuid4()).encode()).hexdigest() + \".png\"\n image_file_name = images_dir + self.output_image_name\n self.output_image.save(image_file_name)\n logger.info(\"Image file saved locally : %s\", image_file_name)", "def test_save_jpg():\n img = Image.new('RGB', (10, 20))\n\n parameters = {'path': 'green-dot.jpg', 'data': [img]}\n\n assert images.save(parameters)", "def work_on_the_picture(self) -> None:\n self.folder_create(self.folder_config)\n value_image_used = os.path.join(self.folder_config, entrance_bot_img_name)\n if os.path.exists(value_image_used) and os.path.isfile(value_image_used):\n return value_image_used\n a = TelegramManager()\n try:\n value_img = self.produce_request(entrance_bot_img_link)\n if value_img.status_code == 200:\n with open(value_image_used, 'wb') as new_picture:\n for chunk in value_img:\n new_picture.write(chunk)\n return value_image_used\n a.proceed_message_values('Unfortunatelly, your link to the image is not working.')\n except Exception as e:\n a.proceed_message_values(f'We faced problem with the getting requests. Mistake: {e}')\n return ''", "def save_images(path, images, filenames):\n if not os.path.exists(path):\n return False\n for i in range(len(images)):\n img_rgb = cv2.cvtColor(images[i], cv2.COLOR_BGR2RGB)\n cv2.imwrite(os.path.join(path, filenames[i]), img_rgb)\n return True", "def maybe_save_images(images, filenames):\n\n if FLAGS.output_dir is not None:\n batch_size = images.shape[0]\n for i in xrange(batch_size):\n image_array = images[i, :, :]\n file_path = os.path.join(FLAGS.output_dir, filenames[i])\n image = Image.fromarray(np.uint8(image_array))\n image.save(file_path)", "def save_images(PATH, show_img, datasets, from_dataset):\n dataset = datasets[from_dataset]\n imgModels = dataset['models']\n for modelname, model in imgModels.items():\n print('save', modelname)\n plt.imshow(model[70])\n plt.set_cmap(\"gray\")\n plt.axis('off')\n plt.savefig(PATH + '/' + from_dataset + '_' + modelname + '.png', dpi=400)\n\n if show_img == True:\n plt.show()", "def pics(path):\n if app.debug:\n return flask.send_from_directory(config['pics_dir'], path)", "def save_step_4(imgs, output_path=\"./output/step4\"):\n # ... your code here ...\n cv2.imwrite(output_path+\"/output.jpg\", imgs)", "def check_png_directories(self):\n check_dir_of = Locations.check_dir_of\n check_dir_of(self.HISTO_PNG)\n check_dir_of(self.LABELS_PNG)\n check_dir_of(self.SOURCE_PNG)", "def save_image(self):\n img = self.driver.find_element_by_xpath(web_map[self.region][img_path]).get_attribute(\"src\")\n img = requests.get(img, stream=True)\n self.search_results.export_image(self.region, img)", "def test_save_fail():\n img = Image.new('RGB', (10, 20))\n\n parameters = {'path': 'foo.bar', 'data': [img]}\n\n images.save(parameters)", "def _dump_image(self):\n if not self._current_id == len(self._img_ids):\n warnings.warn(\n 'Recorded {} out of {} validation images, incomplete results'.format(\n self._current_id, len(self._img_ids)))\n try:\n for im_name, im in self._panoptic_images.items():\n cv2.imwrite(osp.join(self._save_imgpath, im_name), im)\n except IOError as e:\n raise RuntimeError(\"Unable to dump images, ignored. What(): {}\".format(str(e)))", "def img_save(self):\n file_name, extension = return_folder_file_extension(self.img_name)[1:]\n image_name_save = \"%s_D=%s_Rs=%s_size=%s_offset=%i%s\" % (file_name, self.D, self.Rs, self.axe_X, self.offset_X+self.offset_X2, extension)\n\n if self.img2 is not None:\n self.img2.save(image_name_save)\n print(\"Saved \"+image_name_save)\n else:\n print(\"No image to save\")", "def save_image(img: Image, filename: str) -> None:\r\n img.save(filename)", "def test_save_png():\n img = Image.new('RGB', (10, 20))\n\n parameters = {'path': 'green-dot.png', 'data': [img]}\n\n assert images.save(parameters)", "def save_images(images, filenames, output_dir):\n for i, filename in enumerate(filenames):\n # Images for inception classifier are normalized to be in [-1, 1] interval,\n # so rescale them back to [0, 1].\n with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:\n img = np.round(((images[i] + 1.0) * 0.5) * 255.0).astype(np.uint8)\n Image.fromarray(img).save(f, format='PNG')", "def test_save_image(self):\n\n from m3_save_images.m3_save_images import save_images\n folder_destination_name = \"unittest-sorted-images\"\n path_source = \"../img\"\n image_name = [\"00ff00.png\", \"aqua.png\", \"black.jpg\", \"yellow.png\", \"red2.jpg\", \"green.jpg\"]\n image_color = [\"Lime\", \"Aqua\", \"Black\", \"Yellow\", \"Red\", \"Green\"]\n # new empty folder is needed for testing save_image() function\n if os.path.isdir(folder_destination_name):\n shutil.rmtree(folder_destination_name)\n os.mkdir(folder_destination_name)\n # creating folders\n for i in range(0, 4):\n save_images(folder_destination_name, path_source, image_name[i], image_color[i])\n self.assertEqual(''.join(os.listdir(os.path.join(folder_destination_name, image_color[i]))), image_name[i])\n save_images(folder_destination_name, path_source, image_name[i], image_color[5])\n self.assertNotEqual(''.join(os.listdir(os.path.join(folder_destination_name, image_color[i]))), image_name[5])", "def save_img(self):\r\n self.extract_info_from_file()\r\n path_0 = os.path.join(self.output_path, self.field_id, self.patient_id + self.ext)\r\n path_1 = os.path.join(self.output_path, self.field_id + '_' + self.instance, self.patient_id + self.ext)\r\n if self.shot == '0': # first shot\r\n if os.path.exists(path_0) or os.path.exists(path_1):\r\n print(self.patient_id, 'already done')\r\n pass\r\n else:\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)\r\n else: # newer shot\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)" ]
[ "0.7295236", "0.70829517", "0.6849836", "0.6494285", "0.64765847", "0.64765847", "0.6462699", "0.64444625", "0.6435254", "0.6420794", "0.6361256", "0.6350175", "0.63492787", "0.6334107", "0.62305295", "0.6226776", "0.6194464", "0.6176622", "0.6167687", "0.6155911", "0.6128892", "0.6114219", "0.610669", "0.6105437", "0.6088923", "0.60799336", "0.60736656", "0.60733443", "0.60696995", "0.6067688" ]
0.7626312
0
Does the bot retrieve a list of images?
def test_getImages(self): # GIVEN the group chat has at least one image testBot = bot.Bot(os.environ['bot_id'], os.environ['token'], os.environ['group_ID']) imageList = testBot.run() #AND THEN post_images calls the private get_images method which returns an array self.assertTrue(len(imageList) > 0) #THEN there should be at least one element in the array
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_images():\n return json_response(list_manifests())", "def list(self):\n r = self.target.ttbd_iface_call(\"images\", \"list\", method = \"GET\")\n return r['result']", "def list_images(self):\n raise NotImplementedError()", "def getimgs():", "def test_images(self):\n\n message = {\"method\": \"images\", \"params\": {\"elem\": None}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"images\")\n self.assertIsInstance(response[\"result\"], list)\n\n images = [i[\"tag\"] for i in response[\"result\"]]\n\n self.assertIn(self.tag_image, images)", "def fetch_images(client, images):\n return [fetch_image(client, image) for image in images]", "async def image(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send(f\"List of images: {str(self.image_array)}\")", "def list_images():\n image_map = build_image_map()\n click.echo('')\n click.echo('List of available images (Name - Description)')\n click.echo('')\n for name in image_map:\n click.echo('{} -> {}'.format(name, image_map[name]))", "async def fetch_all_images(sess: Session = Depends(get_db)):\n image_list = utils_com.get_com_image_list(sess)\n return image_list", "def images(self):\n return self._data[\"images\"]", "def handle_api_list_images(self, http_context):\n\n command = self.docker + ['images', '--format', '\\'{{json .}}\\'', '--no-trunc', '-a']\n images = []\n for line in subprocess.check_output(command).decode().splitlines():\n image = json.loads(line)\n image['hash'] = image['ID'].split(':')[1][:12]\n images.append(image)\n return images", "def avail_images(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The avail_images function must be called with \"\n \"-f or --function, or with the --list-images option\"\n )\n\n ret = {}\n conn = get_conn()\n\n for item in conn.list_images()[\"items\"]:\n image = {\"id\": item[\"id\"]}\n image.update(item[\"properties\"])\n ret[image[\"name\"]] = image\n\n return ret", "def get_image_list(self, account):\n images = self.driver(account).list_images()\n return [image.name for image in images]", "async def images(self, ctx, *, query: str=None):\n # Handle empty query\n if query is None:\n return await ctx.error('Please provide a query!')\n\n # Using these specific headers and \"lnms\" as source, will provide divs with \"rg_meta\" classes,\n # The modern image search page being JS rendered, data in these divs are jsons with raw image URLs\n # Old image search pages, only have thumbnails and a direct link to websites\n params = {'q': quote_plus(query), 'source': 'lmns', 'tbm': 'isch'}\n async with self.aiohttp_session.get(self.url, params=params, headers=self.image_headers) as r:\n html = await r.text()\n\n # Healthy\n soup = BeautifulSoup(html, 'lxml')\n\n # Go over 4 items, json.loads the item text, and grab \"ou\" probably stands for \"original url\"\n images = []\n for i, item in enumerate(soup.select('div.rg_meta')[:4]):\n js = json.loads(item.text)\n images.append((f\"{i+1}. {js['st']} - {js['s']}\", js[\"ou\"]))\n newl = '\\n'\n await ctx.message.edit(content=f\"```py\\n{newl.join([x[0] for x in images])}\"\n f\"\\n# Choose the appropriate number or type 0 to leave\\n```\")\n\n def check(m):\n return m.author == ctx.author and m.content.isdigit() and m.channel == ctx.channel\n message = await self.bot.wait_for('message', check=check)\n if message.content == \"0\":\n await message.delete()\n return await ctx.message.delete()\n choice = int(message.content) - 1\n await message.delete()\n await ctx.message.edit(content=images[choice][1])", "def test_list_image(self):\n pass", "def get_images(self):\n \n return self.img_lst", "def get_images(self):\n return self._get_brains(\"Image\")", "def images(self):\n return self.gameimage_set.all()", "def get_available_images():\n return AVAILABLE_IMAGES", "def get_images(self):\n # test\n for it in self.xml.iterfind('image'):\n print(it)\n\n elements = []\n els = self.xml.findall('image')\n for el in els:\n elements.push(el.find('src')[0])\n els = self.xml.findall('full_picture')\n elements = elements + els\n self.__download_(elements)", "def populateImagesList(self):\n \n self._gui_server.getImagesList(self._populateImagesList)", "def get_images():\n return _IMAGES", "def get_images(self, page_number):", "def image_list(self):\n return self._image_list", "def test_list_image_metadata(self):\n pass", "def images(self, **kwargs):\n\n path = self._get_movie_id_path('images')\n resp = self._get_method(path, kwargs)\n return resp", "def images(self, **kwargs):\n return self.get_list(self.cloudman.compute.images(),\n kind=\"image\")", "def get_all_images(self):\n self.roses.save_image()\n all_images = Images.get_all_images()\n self.assertTrue(len(all_images)<1)", "def list_images(self):\n \n logging.debug(\"list_images entered for %s\" % self.machine_name) \n snapshots = cs.list_snapshots()\n res = []\n server_id = self.cloudserver.id\n # find the one for this server\n for snapshot in snapshots:\n img = snapshot.metadata.get(\"instance_uuid\", None)\n # print img\n\n if img == server_id:\n print \"Server %s has snapshot %s\" % (server_id, img)\n res.append(img)\n\n return res", "async def img(ctx, message):\n \"\"\":param: ctx\"\"\"\n \"\"\":param: message\"\"\"\n \"\"\"return image url\"\"\"\n link_list = []\n\n url = \"http://imgur.com/search?q=\" + message\n response = urlopen(url)\n html = response.read()\n soup = BeautifulSoup(html, \"lxml\")\n for a in soup.find_all('a', href=True):\n if((a['href'][0:9]) == \"/gallery/\"):\n link_list.append(\"https://imgur.com/\" + a['href'])\n if(len(link_list) >=1):\n random_num = random.randint(0, len(link_list) - 1)\n await bot.say(link_list[random_num])\n else:\n await bot.say(\"there is no contente for \"+message)" ]
[ "0.7578336", "0.7562665", "0.753567", "0.73441315", "0.728204", "0.725693", "0.72322696", "0.71710795", "0.7144439", "0.7026034", "0.69665307", "0.69051415", "0.69041944", "0.6901124", "0.6854555", "0.6846331", "0.676336", "0.6719417", "0.6709045", "0.6673847", "0.6651568", "0.6640355", "0.66228527", "0.66205806", "0.6617064", "0.6610545", "0.6608322", "0.66040754", "0.66006964", "0.65602213" ]
0.75715417
1
Load the dictionary from a file. The file must be whitespaceseparated words. Creates the data structures.
def load(filename): print "Loading dictionary..." dictionary = Dictionary() print " Loading file..." whole_file = file(filename).read().upper() print " Splitting file..." words = whole_file.split() print " Removing unsuitable words..." words = dictionary.remove_unsuitable_words(words) print " Building data structures..." dictionary.set_words(words) print " Loaded %d words" % len(dictionary.words) print " Unique letter size:" print " No blanks: %d" % len(dictionary.letters_map) print " One blank: %d" % len(dictionary.letters_map_one_blank) print " Two blanks: %d" % len(dictionary.letters_map_two_blanks) return dictionary
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_dictionary():\n\tglobal dictionary\n\twith open(FILE, \"r\") as f:\n\t\tfor words in f:\n\t\t\tdictionary += words.split()", "def load_wordlist(filename):\n # YOUR CODE HERE\n words = {}\n f = open(filename, 'rU')\n text = f.read()\n text = text.split('\\n')\n for line in text:\n words[line] = 1\n f.close()\n return words", "def load_wordlist(self, filename):\n reg1 = re.compile(\"^([1-6]{5})[ \\t]+(.*)$\")\n f = open(filename, 'r')\n \n if(self.generate):\n wordlist = []\n reg2 = re.compile(\"^(\\S*)$\")\n for line in f:\n m1 = reg1.match(line)\n m2 = reg2.match(line)\n \n if(m1):\n wordlist.append(m1.group(2))\n elif(m2):\n wordlist.append(m2.group(1))\n \n else:\n wordlist = {}\n for line in f:\n m = reg1.match(line)\n if(m):\n wordlist[int(m.group(1))] = m.group(2)\n \n if((not self.generate and len(wordlist) < 7776) or \n (self.generate and len(wordlist) < 2**13)):\n stderr.write(\"Word list is too short\\n\")\n exit(5)\n \n self.wordlist = wordlist", "def load_dictionary(filename):\n\n word_list = []\n freq_sum = 0\n\n # nacitanie zo suboru\n with open(filename) as f:\n for line in f:\n freq, val = line.split()\n word_list.append(Word(int(freq), val))\n freq_sum += int(freq)\n\n # lexikograficke usporiadanie slov\n word_list_sorted = sorted(word_list, key=operator.attrgetter('value'))\n\n return word_list_sorted, freq_sum", "def _parse_word_dict(dict_f_name):\n if os.path.isfile(dict_f_name):\n word_dict = {}\n with open(dict_f_name) as f:\n for line in f:\n w, idx = line.strip().split(\" \")\n word_dict[w.strip()] = int(idx)\n return word_dict\n else:\n error(\"Dict not exists: %s\" % dict_f_name)", "def read_dictionary():\n with open(FILE, 'r') as f:\n for vocabulary in f:\n if vocabulary[0].strip() not in dict_txt:\n dict_txt[vocabulary[0].strip()] = [vocabulary.strip()]\n else:\n dict_txt[vocabulary[0].strip()].append(vocabulary.strip())", "def read_dictionary():\n global dic\n with open(FILE, 'r') as f:\n for line in f:\n word_list = line.split()\n word = word_list[0].strip()\n dic.append(word)", "def make_word_dict():\n d = dict()\n for line in open('words.txt'):\n word = line.strip().lower()\n d[word] = None\n\n return d", "def read_dictionary():\n with open(FILE, 'r') as f:\n for line in f:\n words_lst = line.split()\n for word in words_lst:\n dict_list.append(word)", "def load_vocab(filename):\n try:\n d = dict()\n with open(filename, encoding='utf-8') as f:\n for idx, word in enumerate(f):\n word = word.strip()\n d[word] = idx\n\n except IOError:\n raise MyIOError(filename)\n return d", "def word_dict():\n fin = open('words.txt')\n w_dict = {}\n for line in fin:\n word = line.strip()\n w_dict[word] = word\n return w_dict", "def LoadDictFile(file,dict_,cast_type):\n\twith open(file,'r') as f:\n\t\tfor line in f:\n\t\t\tline = line.rstrip()\n\t\t\tlst = line.split('=')\n\t\t\tdict_[cast_type(lst[1])] = lst[0]", "def __init__ (self, languageFilename):\n if not isinstance(languageFilename, str): # Checks if the filename is entered as a string.\n raise TypeError('The filename must be a string')\n self._words = set()\n try:\n with open(languageFilename) as data:\n line = data.readline()\n while line:\n line = line.rstrip()\n self._words.add(line)\n line = data.readline()\n except IOError:\n print('Please specify the correct name for the dictionary')", "def fill_in_dict():\n # assign a 'data' list from the txt file\n data = open('words.txt')\n # assign an empty 'my_dict' dictionary\n my_dict = dict()\n\n for word in data:\n # fill in dictionarys wit a keys and empty values\n my_dict[word] = ''\n return(my_dict)", "def read_dict():\n\n\tfilename = 'diction10k.txt'\n\t\n\ttry:\n\t\ttarget = open(filename, 'r')\n\n\texcept:\n\t\tprint(\"Dictionary not found. Please make sure it is located in the same\" \n\t\t\t+ \" folder as strings.py\")\n\t\tsys.exit(1)\n\n\tfor line in target:\n\t\tDICTSET.add(line.strip())", "def read_dictionary(filename='/Users/Paul/Documents/c06d.txt'):\n d = dict()\n fin = open(filename)\n for line in fin:\n\n # skip over the comments\n if line[0] == '#': continue\n\n t = line.split()\n word = t[0].lower()\n pron = ' '.join(t[1:])\n d[word] = pron\n\n return d", "def load():\n for line in open(config.filepath, 'r'):\n line = line.strip()\n line_sorted = ''.join(sorted(line))\n\n if line_sorted not in Words.hashed:\n Words.hashed[line_sorted] = []\n\n # Store the real hashed as a list\n # We need line_sorted as the key for fast lookup later\n Words.hashed[line_sorted].append(line)\n\n # Also add the word to a standard list\n # We'll use this to quickly determine wordiness later\n Words.words.append(line)", "def load_dict_from_txt_file(path, key_type=str, value_type=str):\n with txt_file_reader(path) as txt_file:\n return {key_type(key): value_type(value) for key, value in [line.strip().split('\\t') for line in txt_file]}", "def LoadWords(self,FileName) :\r\n\t\ttry :\r\n\t\t\twith open(FileName,'r') as fhan :\r\n\t\t\t\tWords = fhan.read()\r\n\t\texcept Exception as detail :\r\n\t\t\tlogging.error(\"Failed to read file %s: %s\"%(FileName,detail))\r\n\t\ttry :\r\n\t\t\tWordList = Words.rstrip().split('\\n')\r\n\t\t\tWordList = filter(None,WordList)\r\n\t\t\tWordList = [(Word,) for Word in WordList]\r\n\t\t\tDictRef = self.CreateDict(FileName)\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['SelectDictTable'],(DictRef,))\r\n\t\t\tDictName = self.DB_Cursor.fetchone()[0]\r\n\t\t\tself.DB_Cursor.executemany(self.SQLCMDs['InsertAllWordsToDict']%DictName,WordList)\r\n\t\t\tself.DB_Connect.commit()\r\n\t\t\tlist_id = self.CreateWordList(FileName,DictRef)\r\n\t\t\tself.UpdateWordList(list_id,False)\r\n\t\texcept Exception as detail :\r\n\t\t\tlogging.error(\"Failed to add words to the new dictionary: %s\"%detail)\r\n\t\t\tself.DB_Connect.rollback()\r\n\t\treturn DictRef", "def load_words():\n with open(DICTIONARY) as f:\n return [line.strip() for line in f]", "def read_dictionary_from_file(self, stem_flag):\n file_name = \"/dictionary.txt\" if not stem_flag else \"/dictionaryWithStemming.txt\"\n with open(self.posting_and_dictionary_path + file_name, \"r\") as f:\n txt = f.readlines()\n for line in txt:\n l = line.split(\":\")\n pos = l[1].split(\",\")\n e = DictionaryElement(pos[0])\n e.pointer = int(pos[1])\n e.corpus_tf = int(pos[2])\n if not stem_flag:\n self.term_dictionary[l[0]] = e\n else:\n self.term_dictionary_with_stemming[l[0]] = e\n f.close()", "def load(self, file_name):\n try:\n [self.add_word(w) for w in open(file_name).read().splitlines()]\n except IOError as e:\n print(e)", "def read_file(filename):\n print(\"Reading dictionary: \" +filename)\n word_dict = set()\n\n dictionary = open(filename)\n\n # Read each word from the dictionary\n for word in dictionary:\n # Remove the trailing newline character\n word = word.rstrip('\\n')\n\n # Convert to lowercase\n word = word.lower()\n\n word_dict.add(word)\n\n dictionary.close()\n\n return word_dict", "def make_word_dict():\n d = dict()\n fin = open(\"words.txt\")\n for line in fin:\n word = line.strip().lower()\n d[word] = None\n #have to add single letter words to the word list;\n #also, the empty string is considered a word.\n for letter in ['a', 'i', '']:\n d[letter] = letter\n return d", "def load_dict(dict_path):\n result_dict = {}\n for idx, line in enumerate(io.open(dict_path, \"r\", encoding='utf8')):\n terms = line.strip(\"\\n\")\n result_dict[idx] = terms\n return result_dict", "def load_from_file(self, filename):\n # clear datastore mape\n self._datastoreMap = {}\n # citanje filea\n with open(filename, 'rb') as f:\n binstr = f.read()\n inMap = pickle.loads(binstr)\n # za svaki kanal moramo dodati element u _datastoreMap\n for kanal in inMap:\n # stvaramo instancu Datastore\n self._datastoreMap[kanal] = DataStore()\n # instanca Datastore zna se otpakirati iz mape (dictionary)\n self._datastoreMap[kanal].dict2store(inMap[kanal])", "def load_words():\n # Load all the words from the scrabble dictionary into a python list, words\n fname = 'words.txt'\n with open(fname) as fh:\n words = fh.readlines()\n \n # Create a python dict keyed by sorted letters, with value equal to a list\n # of all the anagrams of that collection of letters\n anagram_tbl = dict()\n for word in words:\n word_lc = word.rstrip().lower()\n key = word_key(word_lc)\n value = anagram_tbl.get(key, []) + [word_lc]\n anagram_tbl[key] = value\n return anagram_tbl", "def load_to_dict(filename):\n dictionary = dict()\n with open(filename) as ff:\n for line in ff:\n (key, val) = line.split()\n dictionary[key] = val\n return dictionary", "def load_dictionary(cls, args, filename, source=True):\n dictionary = Dictionary.load(filename)\n dictionary.add_symbol(\"<mask>\")\n return dictionary", "def load_vocab(vocab_file):\r\n vocab = {}\r\n index = 0\r\n with open(vocab_file, \"r\", encoding=\"utf-8\") as reader:\r\n while True:\r\n token = reader.readline()\r\n if not token:\r\n break\r\n token = token.strip()\r\n vocab[token] = index\r\n index += 1\r\n return vocab" ]
[ "0.7021809", "0.6869227", "0.68494225", "0.6806351", "0.6769147", "0.67662203", "0.673899", "0.67387104", "0.67215824", "0.6672152", "0.6645686", "0.664437", "0.66190505", "0.6611564", "0.65786475", "0.6577035", "0.65754235", "0.6568509", "0.6560537", "0.6529585", "0.65052617", "0.6482895", "0.6467978", "0.6463414", "0.64450324", "0.64213747", "0.6418799", "0.64059985", "0.6399918", "0.6385222" ]
0.7865121
0
Remove words that can't be used in Scrabble, such as those with hyphens and those larger than the size of the board.
def remove_unsuitable_words(words): max_length = Board.SIZE return [word for word in words if word and "-" not in word and len(word) <= max_length]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filterPossibleWords(self): \r\n filledInSpaces = []\r\n for i in range(len(self.currentBoard)):\r\n if self.currentBoard[i] != '_':\r\n filledInSpaces.append( (i, self.currentBoard[i]) )\r\n \r\n self.wordList = list(filter(lambda word: self.viableWord(word, filledInSpaces), self.wordList))", "def remove_spurious_words(text):\n spurious_words = [\"Cached\", \"Similar\", '的']\n for word in spurious_words:\n text = text.replace(word, \"\")\n return re.sub('[.、”“::a-zA-Z%?=()()—「 /-]', ' ', text)", "def remove_longer_words(text):\n return \" \".join([word for word in str(text).split() if len(word) <= 12])", "def remove_rarewords(text):\n return \" \".join([word for word in str(text).split() if word not in RAREWORDS])", "def clean(word):\n word = word.lower()\n stopwords = ['of', 'and','to', 'at', 'in', '@']\n word = re.sub(r'[\\&/\\-\\(\\)\\|\\@,\\]\\[]+', ' ', word)\n for stopword in stopwords:\n pattern = r'\\b' + stopword + r'\\b'\n pattern = re.compile(pattern)\n word = re.sub(pattern, '', word)\n word = re.sub(r'\\s\\s+', ' ', word)\n return word", "def clean_word(word):\n return \"\".join([c for c in word.lower() if ord(c) < 128])", "def sanitize(wl):\n s = []\n for word in wl:\n for symbol in ['.', '!', ',', '\\n', '\\r', '?']:\n if symbol in word:\n s.append(symbol)\n word = word.replace(symbol, '')\n \n s.append(word)\n return s", "def remove_noise(text):\n\n text = text.split()\n word = [word for word in text if word not in [\n 'pertain',\n 'estimate',\n 'link',\n 'and',\n 'more',\n 'fetch',\n 'be',\n 'there',\n 'do',\n 'you',\n 'have',\n 'any',\n 'is',\n 'my',\n 'on',\n 'can',\n 'i',\n 'get',\n 'some',\n 'am',\n 'look',\n 'for',\n 'the',\n 'to',\n 'share',\n 'me',\n 'of',\n 'please',\n 'a',\n 'very',\n 'at',\n 'with',\n 'relate',\n 'sorry'\n ]]\n return ' '.join(word)", "def trim_rule(word, count, min_count):\n if ((word[0] not in string.ascii_uppercase + string.ascii_lowercase) or\n (word in set(stopwords.words('english')))):\n return utils.RULE_DISCARD", "def checkWords(line):\n\n words = []\n parts = re.sub('[^a-zA-Z0-9@ ]', '', line)\n parts = parts.lower()\n parts = parts.split(' ')\n for w in parts:\n if w is not '' and len(w) > 4 and len(w) < 15 and w not in commonWords:\n # if w is not '':\n words.append(w)\n\n return words", "def clean_the_text(text):\n \n #Replace non-word characters with empty space\n text = re.sub('[^A-Za-z0-9\\s]', ' ', text)\n \n #Remove punctuation\n text = ''.join([word for word in text if word not in string.punctuation])\n \n #Bring text to lower case\n text = text.lower()\n \n #Tokenize the text\n tokens = re.split('\\W+', text)\n \n #Remove stopwords\n text = [word for word in tokens if word not in stopword]\n \n #Lemmatize the words\n text = [wn.lemmatize(word) for word in text]\n \n #Return text\n return text", "def filterWords(text):\n words = text.split()\n out = []\n for w in words:\n w = normalizeOrRemoveWord(w)\n if w != None:\n out.append(w)\n return out", "def _clean_words(self, title, filter_stopwords=False):\n chars = '\"[]():;?!,\\'-'\n translation = dict((ord(c), u' ') for c in chars)\n def translate(text):\n if isinstance(text, unicode):\n translated = text.translate(translation)\n else:\n translated = text.translate(None, chars)\n return translated\n strips = '.'\n words = [\n x.strip(strips)\n for x in translate(title).split()\n ]\n for word in words:\n if len(word) >= self.min_word_length:\n if filter_stopwords and word.lower() not in STOPWORDS:\n continue\n # if the word contains non-ascii characters, try to convert\n # it to a ascii equivalent so that it's possible to type\n # \"naive\" when you don't even know how to type \"naïve\"\n try:\n word.encode('ascii')\n except UnicodeEncodeError:\n # it contains non-ascii characters\n ascii_word = unidecode(word)\n yield unicode(ascii_word).lower()\n yield word.lower()\n # yield ''.join(c for c in word if c.isalnum())", "def clean_spoken(words, key_fn=None):\n if key_fn is None:\n key_fn = lambda x: x\n\n # remove disfluencies etc.\n words = (w for w in words if key_fn(w) not in BLACKLIST)\n\n # Remove x+, (xy)+, (xyz)+\n for i in range(1, 4):\n words = remove_duplicate_sequences(words, i, key_fn=key_fn)\n\n return list(words)", "def clean_words(words):\n return [clean_word(w) for w in words if len(clean_word(w)) >= 3]", "def noise_removal(text):\n # Remove HTML\n text = BeautifulSoup(text, \"html.parser\").get_text()\n\n # Remove non-letters\n text = re.sub(\"[^a-zA-Z]\", \" \", text)\n\n # remove letters that are used more than three times in a row\n # sources: https://en.oxforddictionaries.com/explore/words-with-same-letter-three-times-in-a-row/\n # https://stackoverflow.com/questions/4574509/remove-duplicate-chars-using-regex\n text = re.sub(r'([\\w])\\1{2,}', r'\\1', text)\n\n\n word_list = text.split()\n return word_list", "def scrub_words(text):\n \"\"\"Taken from https://github.com/kavgan/nlp-in-practice/blob/master/text-pre-processing/Text%20Preprocessing%20Examples.ipynb \"\"\"\n \n # remove html markup\n text=re.sub(\"(<.*?>)\",\"\",text)\n \n #remove non-ascii and digits\n text=re.sub(\"(\\\\W|\\\\d)\",\" \",text)\n \n # remove the extra spaces that we have so that it is easier for our split :) Taken from https://stackoverflow.com/questions/2077897/substitute-multiple-whitespace-with-single-whitespace-in-python\n text=re.sub(' +', ' ', text).strip()\n return text", "def stripword( s ) :\n return re.sub( '[\\W\\d]', '', s )", "def remove_morethan2letters(text):\n words = text.split();\n n = len(words)\n for i in range(0,n):\n words[i] = util_func(words[i])\n \n return \" \".join(words)", "def purge_words(self):\n\n\t\tword_list = self.transcript_string.encode('utf-8').split()\n\t\tpurged_word_list = {}\n\t\tfor word in word_list:\n\t\t\tif word.isalpha():\n\t\t\t\tif word.islower():\n\t\t\t\t\tpurged_word_list.setdefault(word, []).append(word)\n\t\t\t\telse:\n\t\t\t\t\tlower_word = word.lower()\n\t\t\t\t\tpurged_word_list.setdefault(lower_word, []).append(word) \n\t\t\telse:\n\t\t\t\tcontinue \n\t\t\n\t\tself.word_list = purged_word_list", "def clean_unnecessary_characters(self, tweet):\n tweet = tweet.lstrip(\"\\\"\").rstrip(\"\\\"\")\n tweet = re.sub(self.compiledAlphanumericRegex, ' ', tweet)\n tweet = tweet.replace('_', ' ')\n return tweet", "def clean_review(self, text):\n text = text.lower() # lowercase capital letters\n\n if self.remove_stopwords:\n text = self.remove_stopwords_f(text, keep_neg_words=True)\n\n text = re.sub('[^a-zA-Z]+', ' ', text) # select only alphabet characters (letters only)\n # text = re.sub('[^a-zA-Z0-9]+', ' ', text) # select only alphanumeric characters (letters & numbers)\n # text = re.sub(r'\\W+', ' ', text) # Select only alphanumeric characters (including greek & underscore)\n\n text = re.sub(' +', ' ', text) # remove extra spaces\n\n if self.apply_normalization:\n text = self.normalize_text(text)\n\n return text", "def removeNonDictionaryWords(self, words):\n\t\twordList = [w.strip() for w in words.split(' ')]\n\t\trtnWords = []\n\t\tfor word in wordList:\n\t\t\tif word.lower() in self.dictionary:\n\t\t\t\trtnWords.append(word)\n\t\treturn \" \".join(rtnWords)", "def removeExtraSpaces(self, words):\n\t\treturn re.sub(r'\\s+', ' ', words.strip()).strip()", "def clean_text(s,stem=False):\n\tret = s.lower()\n\tret = re.sub(r'[^a-z ]',' ',ret)\n\tret = re.sub(r' +',' ',ret).strip()\n\tret = re.sub(r'see more occupations related to this (activity|skill|task)','',ret)\n\tif stem:\n\t\tret = ' '.join( stemmer.stem(word) for word in ret.split(' ') )\n\treturn ret", "def clean_non_word_chars(tokens):\n toks = []\n for token in tokens:\n t = re.sub(r'\\W', \"\", token)\n if len(t) > 1:\n toks.append(t)\n\n return toks", "def clean(tweet):\n #Separates the contractions and the punctuation\n\n\n tweet = re.sub(\"[!#.,\\\"]\", \"\", tweet).replace(\"<user>\", \"\")\n tweet = re.sub(\"[!#.,\\\"]\", \"\", tweet).replace(\"<url>\", \"\")\n tweet = correct_spell(tweet)\n return tweet.strip().lower()", "def strip_clean_stem_description(text, stemmer):\n # no damn hyphens!\n text = text.strip().replace(\"-\",\" \")\n\n kill_nonalpha = re.compile(\"[\\W]+\")\n # need to get rid of any stray \"s\" from killing apostrophes\n text = kill_nonalpha.sub(\" \", text).replace(\" s \", \" \").strip()\n\n reduce_whitespace = re.compile(\"[\\s{2,}]+\")\n text = reduce_whitespace.sub(\" \", text)\n # reduce text to nigh-unintelligible word stems to account for tenses and\n # word pluralities. Looking to search, not be pretty.\n text = \" \".join([stemmer.stem(word) for word in text.split(\" \")])\n\n return text", "def clean_unnecessary_whitespaces(self, tweet):\n tweet = ' '.join(tweet.split())\n\n return tweet", "def text_process(mess):\n nopunc= [char for char in mess if char not in string.punctuation]\n nopunc=''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in stopwords.words('english') and len(word)>2]" ]
[ "0.7085279", "0.6904262", "0.69013506", "0.6823802", "0.6685156", "0.65086526", "0.64731383", "0.6419102", "0.6413617", "0.6413319", "0.64127964", "0.6403451", "0.63925976", "0.6373155", "0.6363011", "0.63588434", "0.63422495", "0.6331452", "0.6307014", "0.62975794", "0.6282578", "0.6272541", "0.62724954", "0.62681043", "0.62392455", "0.6236636", "0.62356675", "0.6219246", "0.62053925", "0.6203489" ]
0.785968
0
Generate the maps from the used letters to the list of words.
def generate_letter_maps(self): word_count = len(self.words) last_percent = 0 # Do no-blank words. for i, word in enumerate(self.words): letters = "".join(sorted(set(word))) self.letters_map[letters].append(word) # Do one-blank words. for subword in self.remove_one_letter(letters): self.letters_map_one_blank[subword].append(word) # Do two-blank words. for subword in self.remove_two_letters(letters): self.letters_map_two_blanks[subword].append(word) # Show progress information. percent = int(i*100/word_count) if percent/10 != last_percent/10: print " %d%%" % percent last_percent = percent
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_map():\n known_mappings = {\"a zoo\": \"y qee\",\n \"our language is impossible to understand\": \"ejp mysljylc kd kxveddknmc re jsicpdrysi\",\n \"there are twenty six factorial possibilities\": \"rbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcd\",\n \"so it is okay if you want to just give up\": \"de kr kd eoya kw aej tysr re ujdr lkgc jv\",\n }\n all_letters = \"abcdefghijklmnopqrstuvwxyz\"\n letter_map = {}\n for english, googlerese in known_mappings.items():\n pairs = zip(english, googlerese)\n for e,g in pairs:\n if e not in letter_map:\n letter_map[e] = g\n if len(letter_map) == 26:\n e_letter = \"\"\n g_letter = \"\"\n for letter in all_letters:\n if not e_letter and letter not in letter_map.keys():\n e_letter = letter\n if not g_letter and letter not in letter_map.values():\n g_letter = letter\n letter_map[e_letter] = g_letter\n return \"\".join(letter_map.keys()), \"\".join(letter_map.values())", "def map_word(word, charmap):\n return [charmap[c] for c in word]", "def generate_words(word_size, letters):\r\n # Read file from dictionary text\r\n # Make a DICTIONARY base on word size:\r\n f = open('English.txt')\r\n dictionary = dict()\r\n for line in f:\r\n line = line.strip('\\n') #get rid of \\n at the end of line\r\n if dictionary.has_key(len(line)): \r\n dictionary[len(line)].append(line)\r\n else:\r\n dictionary[len(line)] = [line] #the line in [] is important\r\n\r\n # Go through the DICTIONARY with same word size\r\n # Check if the word has the characters in letter list:\r\n possible_words = []\r\n for word in dictionary[word_size]:\r\n for letter in word:\r\n if letter not in letters:\r\n correct = False\r\n break\r\n else: \r\n correct = True \r\n if (correct):\r\n possible_words.append(word) \r\n return possible_words", "def word2vec_mapping_func():\n return {\"belonging to\": \"belonging\", \"parked on\": \"parked\", \"growing on\": \"growing\", \"standing on\": \"standing\",\n \"made of\": \"made\", \"attached to\": \"attached\", \"hanging from\": \"hanging\", \"in front of\": \"front\",\n \"lying on\": \"lying\", \"flying in\": \"flying\", \"looking at\": \"looking\", \"on back of\": \"back\",\n \"laying on\": \"laying\", \"walking on\": \"walking\", \"walking in\": \"walking\", \"sitting on\": \"sitting\",\n \"covered in\": \"covered\", \"part of\": \"part\", \"painted on\": \"painted\", \"mounted on\": \"mounted\"}", "def get_letter_to_code_mappings():\n return {\n \"a\": \"Alfa\", \"b\": \"Bravo\", \"c\": \"Charlie\", \"d\": \"Delta\", \"e\": \"Echo\",\n \"f\": \"Foxtrot\", \"g\": \"Golf\", \"h\": \"Hotel\", \"i\": \"India\", \"j\":\n \"Juliett\", \"k\": \"Kilo\", \"l\": \"Lima\", \"m\": \"Mike\", \"n\": \"November\", \"o\":\n \"Oscar\", \"p\": \"Papa\", \"q\": \"Quebec\", \"r\": \"Romeo\", \"s\": \"Sierra\", \"t\":\n \"Tango\", \"u\": \"Uniform\", \"v\": \"Victor\", \"w\": \"Whiskey\", \"x\": \"Xray\",\n \"y\": \"Yankee\", \"z\": \"Zulu\", \"0\": \"Zero\", \"1\": \"One\", \"2\": \"Two\", \"3\":\n \"Three\", \"4\": \"Four\", \"5\": \"Five\", \"6\": \"Six\", \"7\": \"Seven\", \"8\":\n \"Eight\", \"9\": \"Niner\", \"=\": \"Equals\", \"?\": \"Query\", \"/\": \"Slash\", \",\":\n \"Comma\", \".\": \"Stop\", \":\": \"Colon\", \"'\": \"Apostrophe\", \"-\": \"Dash\",\n \"(\": \"Open\", \")\": \"Close\", \"@\": \"At\",\n }", "def mapping_letter(letters):\n my_list = list(map(lambda x: x.upper(), letters))\n return dict(zip(letters, my_list))", "def buildDict(self, words):\r\n for word in words:\r\n self.trie.addWord(word)", "def build_dict(word_list):\r\n\r\n # initialize a dictonary\r\n d = dict()\r\n\r\n # iterate through the word_list, mapping sorted letters to word\r\n for i in word_list:\r\n\r\n # key - sorted letters in the word\r\n # how to sort ? --> convert to list, then sort. Finally join the sorted list.\r\n key = ''.join(sorted(list(i)))\r\n\r\n # check if sorted letters avaialble in dict,\r\n # if yes - append the word to the value\r\n # else - put the word as the 0th element of the value list\r\n if key in d:\r\n d[key].append(i)\r\n else:\r\n d[key] = [i]\r\n\r\n return d", "def char_mapping(sentences, lower):\n chars = [[x[0].lower() if lower else x[0] for x in s] for s in sentences]\n dico = create_dico(chars)\n dico[\"<PAD>\"] = 10000001\n dico['<UNK>'] = 10000000\n char_to_id, id_to_char = create_mapping(dico)\n print(\"Found %i unique words (%i in total)\" % (\n len(dico), sum(len(x) for x in chars)\n ))\n return dico, char_to_id, id_to_char", "def anagrams(word_list):\n output = dict()\n\n for word in word_list:\n word = word.strip()\n letters = word_to_tuple(word)\n # add letters as key to output dict\n # if not present already\n output[letters] = output.get(letters, [])\n # append word to list at key\n output[letters].append(word)\n\n return output", "def words_uses_only(letters):\n\treturn {w for w in word_set if uses_only(w, letters)}", "def word_mapping(sentences, lower):\n words = [[x[0].lower() if lower else x[0] for x in s] for s in sentences]\n dico = create_dico(words)\n dico['<UNK>'] = 10000000\n word_to_id, id_to_word = create_mapping(dico)\n print(\"Found %i unique words (%i in total)\" % (\n len(dico), sum(len(x) for x in words))\n )\n return dico, word_to_id, id_to_word", "def _gen_word_dict(words):\n\n # grab all of them with a single in statement.\n results = lookup_words_by_words(\n (set([self.normalize(w) for w in words] + words)),\n session, Word)\n\n def merge(word):\n if word in results:\n return\n # hopefully these are unique.\n # results[word] = unique_merge(session, Word, word=word)\n results[word] = session.merge(Word(word=word))\n\n for word in words:\n merge(word)\n merge(self.normalize(word))\n\n return results", "def word_mapping(sentences, lower):\n words = [[x[0].lower() if lower else x[0] for x in s] for s in sentences]\n dico = create_dico(words)\n\n dico['<PAD>'] = 10000001\n dico['<UNK>'] = 10000000\n dico = {k:v for k,v in dico.items() if v>=3}\n word_to_id, id_to_word = create_mapping(dico)\n\n print(\"Found %i unique words (%i in total)\" % (\n len(dico), sum(len(x) for x in words)\n ))\n return dico, word_to_id, id_to_word", "def generate_words(combo,scrabble_words_dict):\n word_set = set()\n for w in itertools.permutations(combo):\n word = ''.join(w)\n if word in scrabble_words_dict:\n word_set.add(word)\n return word_set", "def test_map(data):\n k, v = data\n for s in split_into_sentences(v):\n for w in split_into_words(s.lower()):\n yield (w, \"\")", "def make_word_dict():\n d = dict()\n fin = open(\"words.txt\")\n for line in fin:\n word = line.strip().lower()\n d[word] = None\n #have to add single letter words to the word list;\n #also, the empty string is considered a word.\n for letter in ['a', 'i', '']:\n d[letter] = letter\n return d", "def word_map(text):\n\n # Replace puncation with words\n s = text.replace('.', \" :period:\")\n s = s.replace('\\n', \"\")\n s = s.replace('\"', \" :quote:\")\n s = s.replace(',', \" :comma:\")\n s = s.replace('?', \" :quest:\")\n\n words = sorted(set(s.split(\" \")))\n\n n_to_word = {}\n word_to_n = {}\n\n num = 0\n for word in words:\n n_to_word[num] = word\n word_to_n[word] = num\n num += 1\n\n return words, n_to_word, word_to_n", "def load_words():\n # Load all the words from the scrabble dictionary into a python list, words\n fname = 'words.txt'\n with open(fname) as fh:\n words = fh.readlines()\n \n # Create a python dict keyed by sorted letters, with value equal to a list\n # of all the anagrams of that collection of letters\n anagram_tbl = dict()\n for word in words:\n word_lc = word.rstrip().lower()\n key = word_key(word_lc)\n value = anagram_tbl.get(key, []) + [word_lc]\n anagram_tbl[key] = value\n return anagram_tbl", "def get_words(f, letters):\n # lettrs = []\n # okay = True\n # words = []\n # nline = ''\n # with open(f, 'r') as vocabulary:\n # for line in vocabulary.readlines():\n # nline = line.replace(\"\\n\", \"\").lower()\n # if 4 <= len(nline) <= 9 and letters[4] in nline:\n # lettrs = list(nline)\n # for lettr in lettrs:\n # if lettr not in letters:\n # okay = False\n # break\n # else:\n # okay = True\n # if okay is True:\n # words.append(nline)\n #\n # lettrs = copy.copy(letters)\n # nwords = []\n # okay = True\n # for word in words[::1]:\n # lettrs = copy.copy(letters)\n # for letter in word:\n # if letter in lettrs:\n # lettrs[lettrs.index(letter)] = '0'\n # else:\n # okay = False\n # break\n # if okay is True:\n # nwords.append(word)\n # okay = True\n #\n # unique = True\n # words = []\n # for word in nwords:\n # if nwords.count(word) > 1:\n # nwords.remove(word)\n # nwords.sort()\n # return nwords\n res = []\n cort_letters = []\n our_letters = []\n res = []\n f = open(f, 'r')\n for line in f:\n line = line.replace(\"\\n\", \"\").strip().lower()\n if 4 <= len(line) <= 9:\n if letters[4] in line:\n count = 0\n for each_letter in line:\n if each_letter in letters:\n count += 1\n if count == len(line):\n our_letters.append(line)\n f.close()\n for each_word in our_letters:\n count_let = 0\n for each_letter in each_word:\n if each_word.count(each_letter) <= letters.count(each_letter):\n count_let += 1\n if count_let == len(each_word):\n res.append(each_word)\n for each in res:\n if res.count(each) > 1:\n res.remove(each)\n return sorted(res)", "def anagrams(word_lst):\n words_dict = {}\n for word in word_lst:\n characters = ''.join(sorted(list(word)))\n if characters in words_dict:\n words_dict[characters].append(word)\n else:\n words_dict[characters] = [word]\n return words_dict", "def words(self):\n\t\treturn {c: sorted(l) for (c,l) in sorted(self.dictData.items())}", "def buildDict(self, words):\n for word in words:\n length = len(word)\n key = \"{}/{}\".format(length, word[0])\n ls = self.origin.get(key, [])\n ls.append(word)\n self.origin[key] = ls", "def create_word(char_list):", "def get_words(f: str, letters: List[str]) -> List[str]:\r\n forbidden_letters = [i for i in string.ascii_lowercase]\r\n for i in letters:\r\n try:\r\n forbidden_letters.remove(i)\r\n except:\r\n pass\r\n words_file = open(f)\r\n word_list = []\r\n letstr = \"\"\r\n for i in letters:\r\n letstr += i\r\n for word in words_file:\r\n word = word[:-1].lower()\r\n if len(word) >= 4:\r\n count = 0\r\n for let in word:\r\n if let in forbidden_letters:\r\n count += 1\r\n if word.count(let) > letstr.count(let):\r\n count += 1\r\n if letters[4] not in word:\r\n count += 1\r\n if count == 0:\r\n word_list.append(word)\r\n return word_list", "def buildDict(self, words):\n self.dict = collections.defaultdict(set)\n for word in words:\n for i in xrange(len(word)):\n self.dict[word[:i] + '*' + word[i+1:]].add(word[i])", "def get_wordlists():\n\n\tCS = {'ACM', 'IEEE', 'Computer Science', 'Artificial Intelligence',\n\t\t'Pattern Recognition', 'Computer Vision', 'Machine Learning',\n\t\t'Signal Processing', 'Electrical Engineering', 'Image Processing',\n\t\t'Data Mining', 'Neural Networks', 'Computer Graphics', 'Graphics',\n\t\t'Language Processing', 'Internet', 'Intelligent Systems',\n\t\t'Robotic','Data','Software', 'Machine Vision', 'Image Analysis',\n\t\t'Scientific Computing', 'SIAM', 'Malware','World Wide Web', \n\t\t'Computational Intelligence', 'Computational Linguistics',\n\t\t'Computational linguistics','Algorithm','Computer','ITiCSE',\n\t\t'ITICSE','Machine learning','Learning','learning',\n\t\t'Artificial intelligence','CIVR','Document Analysis'}\n\n\tbio = {'Biology', 'Microbiology', 'Molecular', 'Medical', 'Biological',\n\t\t'Cancer', 'Genome', 'Bioinformatics', 'Protein', 'Biocomputing',\n\t\t'Biomedical', 'biology', 'Medicine', 'Biosystems', 'Virology',\n\t\t'Brain', 'Psychology', 'Genetics', 'Bioengineering', 'Cell',\n\t\t'Cardiology', 'Metabolic', 'Biotechnology', 'Pathogens',\n\t\t'Pathology', 'Plant', 'PLANT', 'Virus', 'Drug','Medicinal',\n\t\t'Neuro','Psych',\n\t\t'Genomic','Diseases','Endocrinology', 'Epidemiology',\n\t\t'Proteom','Biochem', 'DNA', 'Pharma', 'Biomedic', 'biomedica',\n\t\t'Neurobiological'}\n\n\tmath = {'Mathemati','Markov','Probability','Algebra','Network',\n\t\t'Topology','Optimization', 'Geometr','Statistic','Algorithm',\n\t\t'Graph ','Graphs','Combinatori','Riemann Surfaces','Permutation Groups',\n\t\t'Functional Analysis', 'SIAM','Fixed Point','Wavelet','Statistics',\n\t\t'Linear Regression','Fractal','geometry','Multivariate','Chaos',\n\t\t'mathemati','Kernel'}\n\n\tlinguistics = {}\n\n\tcomputer_vision = {}\n\n\tchemistry = {}\n\n\tphysics = {}\n\n\t# Rename \"Computer Vision\" to \"Image Processing\"?\n\ttopic_names = ['Computer Science','Biology','Mathematics','Chemistry',\n\t\t'Physics','Computer Vision','Natural Language Processing']\n\ttopics = [CS, bio, math]#, linguistics, computer_vision, chemistry, physics]\n\n\treturn {topic_names[i]:topics[i] for i in range(len(topics))}", "def generate_dictionary(location):\n f = open('../data/wordlist.txt', 'rb')\n words = Counter(re.findall('[a-z]+', f.read().lower().decode()))\n joblib.dump(words, location)", "def letterFreq(words):\n dict = {}\n total = 0\n for word in words:#Iterate through words\n for letter in word:#Increment by letter\n count = 0\n for yearCount in words[word]:\n count += yearCount.count#Increment total instances of word\n total += count#Count total letters\n if letter in dict:\n dict[letter] += count#Add to existing entry\n else:\n dict[letter] = count#Create new entry\n \"\"\"CODE FOR THE WHOLE ALPHABET\"\"\"\n list = []\n for letter in ascii_lowercase:\n if letter in dict and dict[letter] != 0:\n list.append(dict[letter] / total)#Convert to relative\n else:\n list.append(0.0)#Fill alphabet\n return list", "def generate_words_with_scores(rack,placed_tile,scrabble_words_dict):\n res_set = set()\n word_score_dict = {}\n comb_set = generate_combinations(rack,placed_tile)\n for combo in comb_set:\n words_set = generate_words(combo, scrabble_words_dict)\n for word in words_set:\n res_set.add(word)\n for word in res_set:\n score = calculate_score(rack,word)\n word_score_dict[word] = score\n return word_score_dict" ]
[ "0.76034564", "0.7033079", "0.68971705", "0.6837685", "0.68305516", "0.6805587", "0.6706723", "0.6682043", "0.6676078", "0.6675726", "0.6638174", "0.661801", "0.6579658", "0.65770775", "0.65488666", "0.65322185", "0.6516602", "0.64874303", "0.6452002", "0.64316815", "0.64184576", "0.6343061", "0.63259846", "0.63255495", "0.62985057", "0.62982726", "0.6287662", "0.62768215", "0.62536687", "0.62450373" ]
0.80057627
0
Returns a sequence of words from "word" with each letter missing.
def remove_one_letter(word): for i in range(len(word)): yield word[:i] + word[i + 1:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def words_without_letter(l):\n\treturn {w for w in word_set if has_no_letter(w, l)}", "def remove_two_letters(word):\n for i in range(len(word) - 1):\n first_part = word[:i]\n for j in range(i + 1, len(word)):\n yield first_part + word[i + 1:j] + word[j + 1:]", "def gen_all_strings(word):\n if not word:\n return [\"\"]\n \n all_strings = []\n for string in gen_all_strings(word[1:]):\n for letter_idx in range(len(string) + 1):\n all_strings.append(string[letter_idx:] + word[0] + string[:letter_idx])\n \n return gen_all_strings(word[1:]) + all_strings", "def gen_all_strings(word):\n if word == '':\n return ['']\n else:\n first = word[0]\n rest = word[1:]\n rest_strings = gen_all_strings(rest)\n all_words = []\n for string in rest_strings:\n for leter in range(len(string)+1):\n all_words.append(string[0:leter]+first+string[leter:])\n\n return rest_strings + all_words", "def cleanWord(word):\r\n newWord = [letter.lower() for letter in word if letter.isalpha()]\r\n return \"\".join(newWord)", "def get_words(line):\n try:\n alphabet = \"abcdefghijklmnopqrstuvwqyz \"\n lineL = list(line.lower())\n for word in lineL:\n if word not in alphabet:\n lineL.remove(word)\n lineS = \"\".join(lineL)\n lineL = lineS.split(\" \")\n for word in lineL:\n if word == \"\":\n lineL.remove(word)\n return lineL\n except:\n print(\"Error get_words()\")", "def extrachar(word: str) -> Iterator[str]:\n if len(word) < 2:\n return\n\n for i in range(0, len(word)):\n yield word[:i] + word[i+1:]", "def filterPossibleWords(self): \r\n filledInSpaces = []\r\n for i in range(len(self.currentBoard)):\r\n if self.currentBoard[i] != '_':\r\n filledInSpaces.append( (i, self.currentBoard[i]) )\r\n \r\n self.wordList = list(filter(lambda word: self.viableWord(word, filledInSpaces), self.wordList))", "def missingWords(s, t):\n missingWords = []\n \n new_t = t.split()\n new_s = s.split()\n\n for index, word in enumerate(new_s):\n if new_t[index] != word:\n missingWords.append(word)\n new_t[index] = word\n\n new_list = new_s - new_t\n\n return new_list", "def gen_all_strings(word):\r\n if len(word) == 0:\r\n return ['']\r\n else:\r\n first = word[0]\r\n rest = gen_all_strings(word[1:])\r\n new = []\r\n for item in rest:\r\n if len(item) > 0:\r\n for pos in range(len(item)):\r\n new.append(item[:pos] + first + item[pos:])\r\n new.append(item + first)\r\n new.append(first)\r\n new.extend(rest)\r\n return new", "def remove_unsuitable_words(words):\n\n max_length = Board.SIZE\n return [word for word in words if word and \"-\" not in word and len(word) <= max_length]", "def filterWords(text):\n words = text.split()\n out = []\n for w in words:\n w = normalizeOrRemoveWord(w)\n if w != None:\n out.append(w)\n return out", "def squeeze(word):\n return ''.join(x[0] for x in groupby(word))", "def string_letter_removal(word, letter):\n text_mod = \"\"\n for char in word:\n if char != letter:\n text_mod += char\n return text_mod", "def missingWords2(s, t):\n # missingWords = []\n \n new_s = s.split()\n # print(new_s)\n\n new_t = t.split()\n # print(new_t)\n\n missing = []\n\n while len(new_t) > 0:\n for word in new_s:\n if word not in new_t:\n missing.append(word)\n else:\n new_t.remove(word)\n\n return missing", "def remove_repeated_characters(word):\n def get_real_word(word):\n if wordnet.synsets(word):\n return word\n new_word = repeats.sub(match_sub, word)\n return get_real_word(new_word) if new_word != word else new_word\n repeats = re.compile(r'(\\w*)(\\w)\\2(\\w*)')\n match_sub = r'\\1\\2\\3'\n return [get_real_word(word) for word in word]", "def ladder(word: str) -> List[str]:\n found_words = set()\n for i in range(len(word)):\n pattern = list(word)\n pattern[i] = '.'\n search_results = search(\"^\" + \"\".join(pattern) + \"$\")\n for result in search_results:\n if result != word:\n found_words.add(result)\n return found_words", "def generate_alphabet_from_word(word):\n word = \" \"+word+\" \"\n chars = [char for char in word] # Getting letters from the word\n chars += map(add, chars[:-1], chars[1:]) # Adding bigrams to the list\n\n # Computing hash of items and add 0 to the list\n return set([0] + [anagram_hash(c) for c in set(chars)])", "def full_words(word, string, sensitive=True):\n temp_word = ''\n o = []\n start = 0\n if not sensitive:\n word = word.lower()\n string = string.lower()\n for i, char in enumerate(string):\n if char != ' ':\n temp_word += char\n if i == 0:\n start = 0\n else:\n if string[i - 1] == ' ':\n start = i\n if i == len(string) - 1:\n if temp_word == word:\n o.append([start, start + len(word)])\n else:\n if temp_word == word:\n o.append([start, start + len(word)])\n temp_word = ''\n return o", "def clean_word(word):\n return \"\".join([c for c in word.lower() if ord(c) < 128])", "def filter_empty(word_list):\n new_list = []\n for x in word_list:\n if(x):\n new_list.append(x)\n return new_list", "def _get_replacement_words(self, word):\n\n if len(word) <= 1:\n return []\n\n candidate_words = []\n\n start_idx = 1 if self.skip_first_char else 0\n end_idx = (len(word) - 2) if self.skip_last_char else (len(word) - 1)\n\n if start_idx >= end_idx:\n return []\n\n if self.random_one:\n i = np.random.randint(start_idx, end_idx)\n candidate_word = word[:i] + word[i + 1] + word[i] + word[i + 2 :]\n candidate_words.append(candidate_word)\n else:\n for i in range(start_idx, end_idx):\n candidate_word = word[:i] + word[i + 1] + word[i] + word[i + 2 :]\n candidate_words.append(candidate_word)\n\n return candidate_words", "def make_bag(txt, stopw):\n bow = re.split('\\s',txt.lower())\n new_bow=[]\n for word in bow:\n if word not in stopw and len(word)>0 and not re.search('\\d',word):\n new_bow.append(word)\n return(new_bow)", "def doubletwochars(word: str) -> Iterator[str]:\n\n if len(word) < 5:\n return\n\n # TODO: 1) for vacacation yields \"vacation\" twice, hunspell's algo kinda wiser\n # 2) maybe just use regexp?..\n for i in range(2, len(word)):\n if word[i-2] == word[i] and word[i-3] == word[i-1]:\n yield word[:i-1] + word[i+1:]", "def shuffle(word: str) -> List[str]:\n letters = list(word)\n while True:\n random.shuffle(letters)\n new_word = \"\".join(letters)\n if new_word != word:\n return letters", "def anagrams(word): \n if len(word) < 2:\n yield word\n else:\n for i, letter in enumerate(word):\n if not letter in word[:i]: #avoid duplicating earlier words\n for j in anagrams(word[:i]+word[i+1:]):\n yield j+letter", "def blank():\n return([word for word in decoded_words if sum([1 for char in word if char.isupper()==False]) == 1])", "def removeNonDictionaryWords(self, words):\n\t\twordList = [w.strip() for w in words.split(' ')]\n\t\trtnWords = []\n\t\tfor word in wordList:\n\t\t\tif word.lower() in self.dictionary:\n\t\t\t\trtnWords.append(word)\n\t\treturn \" \".join(rtnWords)", "def encode_word(word: str) -> List[str]:\n inner_letters = word[1:-1]\n inner_letters = shuffle(inner_letters)\n return [word[0], *inner_letters, word[-1]]", "def extract_characters(word):\n char_bbs = []\n column = 0\n char_start = -1\n while column < word.shape[1]:\n while not word[:, column].any():\n if char_start != -1:\n char_bbs.append(np.s_[:, char_start:column])\n char_start = -1\n column += 1\n if char_start == -1:\n char_start = column\n column += 1\n if char_start != -1:\n char_bbs.append(np.s_[:, char_start:column])\n return char_bbs" ]
[ "0.6977009", "0.6552175", "0.6551507", "0.63555896", "0.6343313", "0.62956893", "0.62667996", "0.62119746", "0.620326", "0.619742", "0.6173767", "0.61558497", "0.61534464", "0.61103296", "0.60804164", "0.6049091", "0.5966941", "0.5939155", "0.5914981", "0.5904095", "0.5892561", "0.58718675", "0.58385736", "0.58290845", "0.5825985", "0.5815231", "0.57720256", "0.5766631", "0.5755129", "0.5738441" ]
0.7108938
0
Returns a sequence of words from "word" with pairs of letters missing.
def remove_two_letters(word): for i in range(len(word) - 1): first_part = word[:i] for j in range(i + 1, len(word)): yield first_part + word[i + 1:j] + word[j + 1:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extrachar(word: str) -> Iterator[str]:\n if len(word) < 2:\n return\n\n for i in range(0, len(word)):\n yield word[:i] + word[i+1:]", "def twowords(word: str) -> Iterator[List[str]]:\n\n for i in range(1, len(word)):\n yield [word[:i], word[i:]]", "def doubletwochars(word: str) -> Iterator[str]:\n\n if len(word) < 5:\n return\n\n # TODO: 1) for vacacation yields \"vacation\" twice, hunspell's algo kinda wiser\n # 2) maybe just use regexp?..\n for i in range(2, len(word)):\n if word[i-2] == word[i] and word[i-3] == word[i-1]:\n yield word[:i-1] + word[i+1:]", "def gen_all_strings(word):\n if not word:\n return [\"\"]\n \n all_strings = []\n for string in gen_all_strings(word[1:]):\n for letter_idx in range(len(string) + 1):\n all_strings.append(string[letter_idx:] + word[0] + string[:letter_idx])\n \n return gen_all_strings(word[1:]) + all_strings", "def gen_all_strings(word):\n if word == '':\n return ['']\n else:\n first = word[0]\n rest = word[1:]\n rest_strings = gen_all_strings(rest)\n all_words = []\n for string in rest_strings:\n for leter in range(len(string)+1):\n all_words.append(string[0:leter]+first+string[leter:])\n\n return rest_strings + all_words", "def remove_one_letter(word):\n for i in range(len(word)):\n yield word[:i] + word[i + 1:]", "def gen_all_strings(word):\r\n if len(word) == 0:\r\n return ['']\r\n else:\r\n first = word[0]\r\n rest = gen_all_strings(word[1:])\r\n new = []\r\n for item in rest:\r\n if len(item) > 0:\r\n for pos in range(len(item)):\r\n new.append(item[:pos] + first + item[pos:])\r\n new.append(item + first)\r\n new.append(first)\r\n new.extend(rest)\r\n return new", "def split_word(word):\n return [(word[:i], word[i:]) for i in range(len(word) + 1)]", "def get_pairs(word):\r\n pairs = set()\r\n prev_char = word[0]\r\n for char in word[1:]:\r\n pairs.add((prev_char, char))\r\n prev_char = char\r\n return pairs", "def get_pairs(self, word: List[str]) -> List[Tuple[str, str]]:\n pairs: List[Tuple[str, str]] = []\n prev_char = word[0]\n for char in word[1:]:\n pairs.append((prev_char, char))\n prev_char = char\n return pairs", "def get_pairs(word):\n pairs = set()\n prev_char = word[0]\n for char in word[1:]:\n pairs.add((prev_char, char))\n prev_char = char\n return pairs", "def get_pairs(word):\n pairs = set()\n prev_char = word[0]\n for char in word[1:]:\n pairs.add((prev_char, char))\n prev_char = char\n return pairs", "def _get_replacement_words(self, word):\n\n if len(word) <= 1:\n return []\n\n candidate_words = []\n\n start_idx = 1 if self.skip_first_char else 0\n end_idx = (len(word) - 2) if self.skip_last_char else (len(word) - 1)\n\n if start_idx >= end_idx:\n return []\n\n if self.random_one:\n i = np.random.randint(start_idx, end_idx)\n candidate_word = word[:i] + word[i + 1] + word[i] + word[i + 2 :]\n candidate_words.append(candidate_word)\n else:\n for i in range(start_idx, end_idx):\n candidate_word = word[:i] + word[i + 1] + word[i] + word[i + 2 :]\n candidate_words.append(candidate_word)\n\n return candidate_words", "def zip_letters(xl, yl, dxl, dyl, rl, word):\n return (\n ([pl.pop(0) if pl else None for pl in (xl, yl, dxl, dyl, rl)], char)\n for char in word)", "def subword_from_word(word, length=None):\n length = len(word) if length is None else length\n a = {''.join(p) for p in permutations(word, length)}\n a = list(a)\n return a", "def words_without_letter(l):\n\treturn {w for w in word_set if has_no_letter(w, l)}", "def encode_word(word: str) -> List[str]:\n inner_letters = word[1:-1]\n inner_letters = shuffle(inner_letters)\n return [word[0], *inner_letters, word[-1]]", "def anagrams(word): \n if len(word) < 2:\n yield word\n else:\n for i, letter in enumerate(word):\n if not letter in word[:i]: #avoid duplicating earlier words\n for j in anagrams(word[:i]+word[i+1:]):\n yield j+letter", "def ladder(word: str) -> List[str]:\n found_words = set()\n for i in range(len(word)):\n pattern = list(word)\n pattern[i] = '.'\n search_results = search(\"^\" + \"\".join(pattern) + \"$\")\n for result in search_results:\n if result != word:\n found_words.add(result)\n return found_words", "def gen_all_strings(word):\n if len(word) == 0:\n return [\"\"]\n elif len(word) == 1:\n return [\"\",word]\n else:\n result_strings = []\n first = word[0]\n rest = word[1:]\n rest_strings = gen_all_strings(rest)\n new_strings = []\n for rest_string in rest_strings:\n for dummy_index in range(len(rest_string)):\n #在首位插入\n if dummy_index == 0:\n new_string = first + rest_string\n new_strings.append(new_string)\n #在中间插入 \n else:\n new_string = rest_string[0:dummy_index] + first + rest_string[dummy_index:]\n new_strings.append(new_string)\n #在末尾插入\n new_strings.append(rest_string + first)\n \n result_strings.extend(rest_strings)\n result_strings.extend(new_strings)\n \n return result_strings", "def remove_repeated_characters(word):\n def get_real_word(word):\n if wordnet.synsets(word):\n return word\n new_word = repeats.sub(match_sub, word)\n return get_real_word(new_word) if new_word != word else new_word\n repeats = re.compile(r'(\\w*)(\\w)\\2(\\w*)')\n match_sub = r'\\1\\2\\3'\n return [get_real_word(word) for word in word]", "def missingWords(s, t):\n missingWords = []\n \n new_t = t.split()\n new_s = s.split()\n\n for index, word in enumerate(new_s):\n if new_t[index] != word:\n missingWords.append(word)\n new_t[index] = word\n\n new_list = new_s - new_t\n\n return new_list", "def word_to_ngrams(self, word):\n encoding = list()\n n = self.n\n if word == self.eos or word == self.sos:\n encoding.append(self.ngram_to_id[word])\n else:\n _word = '^' + word + '$'\n for i in range(len(_word) - n + 1):\n ngram = _word[i:i + n]\n if ngram in self.ngram_to_id:\n encoding.append(self.ngram_to_id[ngram])\n else:\n for ch in ngram:\n flag = 1\n if ch in self.unk_char_list:\n flag = random.randint(0, 1)\n if ch in self.ngram_to_id and flag == 1:\n encoding.append(self.ngram_to_id[ch])\n else:\n encoding.append(self.ngram_to_id['<unk>'])\n return encoding", "def full_words(word, string, sensitive=True):\n temp_word = ''\n o = []\n start = 0\n if not sensitive:\n word = word.lower()\n string = string.lower()\n for i, char in enumerate(string):\n if char != ' ':\n temp_word += char\n if i == 0:\n start = 0\n else:\n if string[i - 1] == ' ':\n start = i\n if i == len(string) - 1:\n if temp_word == word:\n o.append([start, start + len(word)])\n else:\n if temp_word == word:\n o.append([start, start + len(word)])\n temp_word = ''\n return o", "def squeeze(word):\n return ''.join(x[0] for x in groupby(word))", "def sub_words(word):\n sub_words_lst = []\n for i in range(len(word)):\n sub_word = word[:i]+word[i+1:]\n sub_words_lst.append(sub_word)\n return sub_words_lst", "def return_split_word(word):\n\n res = []\n words = find_combination_of_words(word)\n for word in words:\n if len(word[1]) > 0:\n res.append((word[0], max(word[1], key=lambda w: len(w))))\n\n return res", "def generate_alphabet_from_word(word):\n word = \" \"+word+\" \"\n chars = [char for char in word] # Getting letters from the word\n chars += map(add, chars[:-1], chars[1:]) # Adding bigrams to the list\n\n # Computing hash of items and add 0 to the list\n return set([0] + [anagram_hash(c) for c in set(chars)])", "def process_word(self, word: str) -> list[str]:\n d = self.d\n if not d:\n return None\n if d.check(word):\n return None\n # Speed doesn't matter here. The more we find, the more convenient.\n # Remove all digits.\n word = ''.join([i for i in word if not i.isdigit()])\n if d.check(word) or d.check(word.lower()):\n return None\n if word.find('_') > -1:\n # Snake case.\n words = word.split('_')\n for word2 in words:\n if not d.check(word2) and not d.check(word2.lower()):\n return d.suggest(word)\n return None\n words = g.unCamel(word)\n if words:\n for word2 in words:\n if not d.check(word2) and not d.check(word2.lower()):\n return d.suggest(word)\n return None\n return d.suggest(word)", "def process_word(self, word: str) -> list[str]:\n d = self.d\n if not d:\n return None\n if d.check(word):\n return None\n # Speed doesn't matter here. The more we find, the more convenient.\n # Remove all digits.\n word = ''.join([i for i in word if not i.isdigit()])\n if d.check(word) or d.check(word.lower()):\n return None\n if word.find('_') > -1:\n # Snake case.\n words = word.split('_')\n for word2 in words:\n if not d.check(word2) and not d.check(word2.lower()):\n return d.suggest(word)\n return None\n words = g.unCamel(word)\n if words:\n for word2 in words:\n if not d.check(word2) and not d.check(word2.lower()):\n return d.suggest(word)\n return None\n return d.suggest(word)" ]
[ "0.71038306", "0.6765064", "0.67550427", "0.6735594", "0.6625915", "0.6620199", "0.65177166", "0.65156925", "0.6513065", "0.6501058", "0.64591277", "0.64591277", "0.6438254", "0.6437247", "0.624116", "0.6238542", "0.6235051", "0.6182002", "0.616684", "0.61635417", "0.60932815", "0.6068186", "0.60426414", "0.6042317", "0.602978", "0.60270584", "0.60218894", "0.60070014", "0.59723496", "0.59723496" ]
0.7147192
0
r"""Get basic information about the algorithm.
def algorithmInfo(): return r"""TODO"""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def algorithmInfo():\n\t\treturn r\"\"\"Fister Jr., Iztok and Fister, Dusan and Yang, Xin-She. \"A Hybrid Bat Algorithm\". Elektrotehniski vestnik, 2013. 1-7.\"\"\"", "def getInfo():", "def describe_algorithm(AlgorithmName=None):\n pass", "def info() -> None:", "def get_algo_info(self, algo=None, **kwargs):\n if algo:\n return self.mrr_obj.get('/info/algos' + '/' + algo, **kwargs)\n return self.mrr_obj.get('/info/algos')", "def test_algorithm_info_fine(self):\n\t\ti = self.algo.algorithmInfo()\n\t\tself.assertIsNotNone(i)", "def get_info(self):\n pass", "def get_info(self):\n pass", "def get_info():\n global PERF_APP\n archs = None\n best_arch = None\n cipher_algos = None\n hash_algos = None\n aead_algos = None\n\n cmd = PERF_APP + ' --print-info'\n\n try:\n res = subprocess.run(cmd, stdout=subprocess.PIPE, \\\n stderr=subprocess.STDOUT, \\\n env=ENVS, shell=True, check=True)\n output = res.stdout.decode('utf-8')\n except subprocess.CalledProcessError as e:\n print(\"Error (\" + str(e.returncode) + \")\")\n print(e.output.decode('utf-8'))\n sys.exit(1)\n\n lines = output.rstrip().split('\\n')\n try:\n for line in lines:\n info = line.split(':')\n if info[0] == 'Supported architectures':\n archs = info[1].split()\n if info[0] == 'Best architecture':\n best_arch = info[1].split()\n if info[0] == 'Supported cipher algorithms':\n cipher_algos = info[1].split()\n if info[0] == 'Supported hash algorithms':\n hash_algos = info[1].split()\n if info[0] == 'Supported aead algorithms':\n aead_algos = info[1].split()\n except:\n print(\"Error parsing --print-info output:\\n\" \\\n \"{}\".format(output), file=sys.stderr)\n\n if archs is None or best_arch is None or cipher_algos is None \\\n or hash_algos is None or aead_algos is None:\n print(\"Error parsing system and app information\", file=sys.stderr)\n sys.exit(1)\n\n return archs, best_arch, cipher_algos, hash_algos, aead_algos", "def info(self) -> dict:", "def rpc_info():", "def info(self):", "def info(self):", "def get_info(self):\n return \"TODO !\"", "def _get_information(self):\n pass", "def get_info(self):\n raise NotImplementedError(\"Robot.get_info\")", "def print_info(self):\n\n print \"\\nALGORITHM INFO\"\n print \"modelnumber:\", self.modelnumber\n print \"restart:\", self.restart\n print \"particles:\", self.particles\n print \"beta:\", self.beta\n print \"dt:\", self.dt\n if self.mode != 1:\n if len(self.final_epsilon) == 0:\n print \"manual epsilon:\"\n for i in range(self.epsilon.shape[0]):\n print \"\\t\",\n for j in range(self.epsilon.shape[1]):\n print \"\", self.epsilon[i, j],\n print \"\"\n else:\n print \"auto epsilon:\"\n print \"\\t\", self.final_epsilon\n print \"\\talpha:\", self.alpha\n\n print \"kernel:\", self.kernel\n print \"model kernel:\", self.modelkernel\n print \"model prior:\", self.modelprior\n\n print \"DATA:\"\n print \"\\ttimes:\", self.times\n if self.mode == 0:\n print \"\\tvars:\"\n for i in range(len(self.data[0, :])):\n print \"\\t\",\n for j in range(self.ntimes):\n print \"\", self.data[j, i],\n print \"\"\n\n print \"MODELS:\", self.nmodels\n for i in range(self.nmodels):\n print \"\\t\", \"npar:\", self.nparameters[i]\n print \"\\t\", \"nspecies:\", self.nspecies[i]\n print \"\\t\", \"name:\", self.name[i]\n print \"\\t\", \"source:\", self.source[i]\n print \"\\t\", \"type:\", self.type[i]\n print \"\\t\", \"fit:\", self.fit[i]\n print \"\\t\", \"init:\", self.x0prior[i]\n print \"\\t\", \"prior:\", self.prior[i]\n print \"\\t\", \"logp:\", self.logp[i]\n print \"\\n\"", "def get_info(self) -> str:\n raise NotImplementedError()", "def get_algorithm(self):\n pass", "def algorithm(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"algorithm\")", "def info() -> Dict[str, Any]:", "def info(self) -> int:", "def get_info(self):\n return None", "def get_alg(self):\r\n raise NotImplementedError", "def describe():", "def info(self):\n\n print(\"pixellisation:\", self.pixel)\n print(\"number of components:\", self.ncomp)\n print(\"number of pixels:\", self.data.shape[:] if self.ncomp == 1 else self.data.shape[1:])\n print(\"nside:\", self.nside)\n print(\"geometry:\", self.geometry)\n print(\"coordinates:\", self.coordinate)", "def module_info():\n pass", "def _get_spec_info(self):\n raise NotImplementedError()", "def describe(self) -> str:", "def info(self):\r\n\r\n return self.sim_info" ]
[ "0.7340363", "0.7214091", "0.72085875", "0.7114501", "0.6941842", "0.69047254", "0.683423", "0.683423", "0.66760594", "0.6650416", "0.66345215", "0.6631911", "0.6631911", "0.65858006", "0.6542308", "0.6487851", "0.64559406", "0.6408391", "0.63860804", "0.63573605", "0.6355828", "0.63477474", "0.63317275", "0.62729084", "0.6254106", "0.62207556", "0.6198594", "0.6177911", "0.6167968", "0.61678773" ]
0.8159429
0
r"""Get basic information about the algorithm.
def algorithmInfo(): return r"""Fister Jr., Iztok and Fister, Dusan and Yang, Xin-She. "A Hybrid Bat Algorithm". Elektrotehniski vestnik, 2013. 1-7."""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def algorithmInfo():\n\t\treturn r\"\"\"TODO\"\"\"", "def getInfo():", "def describe_algorithm(AlgorithmName=None):\n pass", "def info() -> None:", "def get_algo_info(self, algo=None, **kwargs):\n if algo:\n return self.mrr_obj.get('/info/algos' + '/' + algo, **kwargs)\n return self.mrr_obj.get('/info/algos')", "def test_algorithm_info_fine(self):\n\t\ti = self.algo.algorithmInfo()\n\t\tself.assertIsNotNone(i)", "def get_info(self):\n pass", "def get_info(self):\n pass", "def get_info():\n global PERF_APP\n archs = None\n best_arch = None\n cipher_algos = None\n hash_algos = None\n aead_algos = None\n\n cmd = PERF_APP + ' --print-info'\n\n try:\n res = subprocess.run(cmd, stdout=subprocess.PIPE, \\\n stderr=subprocess.STDOUT, \\\n env=ENVS, shell=True, check=True)\n output = res.stdout.decode('utf-8')\n except subprocess.CalledProcessError as e:\n print(\"Error (\" + str(e.returncode) + \")\")\n print(e.output.decode('utf-8'))\n sys.exit(1)\n\n lines = output.rstrip().split('\\n')\n try:\n for line in lines:\n info = line.split(':')\n if info[0] == 'Supported architectures':\n archs = info[1].split()\n if info[0] == 'Best architecture':\n best_arch = info[1].split()\n if info[0] == 'Supported cipher algorithms':\n cipher_algos = info[1].split()\n if info[0] == 'Supported hash algorithms':\n hash_algos = info[1].split()\n if info[0] == 'Supported aead algorithms':\n aead_algos = info[1].split()\n except:\n print(\"Error parsing --print-info output:\\n\" \\\n \"{}\".format(output), file=sys.stderr)\n\n if archs is None or best_arch is None or cipher_algos is None \\\n or hash_algos is None or aead_algos is None:\n print(\"Error parsing system and app information\", file=sys.stderr)\n sys.exit(1)\n\n return archs, best_arch, cipher_algos, hash_algos, aead_algos", "def info(self) -> dict:", "def rpc_info():", "def info(self):", "def info(self):", "def get_info(self):\n return \"TODO !\"", "def _get_information(self):\n pass", "def get_info(self):\n raise NotImplementedError(\"Robot.get_info\")", "def print_info(self):\n\n print \"\\nALGORITHM INFO\"\n print \"modelnumber:\", self.modelnumber\n print \"restart:\", self.restart\n print \"particles:\", self.particles\n print \"beta:\", self.beta\n print \"dt:\", self.dt\n if self.mode != 1:\n if len(self.final_epsilon) == 0:\n print \"manual epsilon:\"\n for i in range(self.epsilon.shape[0]):\n print \"\\t\",\n for j in range(self.epsilon.shape[1]):\n print \"\", self.epsilon[i, j],\n print \"\"\n else:\n print \"auto epsilon:\"\n print \"\\t\", self.final_epsilon\n print \"\\talpha:\", self.alpha\n\n print \"kernel:\", self.kernel\n print \"model kernel:\", self.modelkernel\n print \"model prior:\", self.modelprior\n\n print \"DATA:\"\n print \"\\ttimes:\", self.times\n if self.mode == 0:\n print \"\\tvars:\"\n for i in range(len(self.data[0, :])):\n print \"\\t\",\n for j in range(self.ntimes):\n print \"\", self.data[j, i],\n print \"\"\n\n print \"MODELS:\", self.nmodels\n for i in range(self.nmodels):\n print \"\\t\", \"npar:\", self.nparameters[i]\n print \"\\t\", \"nspecies:\", self.nspecies[i]\n print \"\\t\", \"name:\", self.name[i]\n print \"\\t\", \"source:\", self.source[i]\n print \"\\t\", \"type:\", self.type[i]\n print \"\\t\", \"fit:\", self.fit[i]\n print \"\\t\", \"init:\", self.x0prior[i]\n print \"\\t\", \"prior:\", self.prior[i]\n print \"\\t\", \"logp:\", self.logp[i]\n print \"\\n\"", "def get_info(self) -> str:\n raise NotImplementedError()", "def get_algorithm(self):\n pass", "def algorithm(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"algorithm\")", "def info() -> Dict[str, Any]:", "def info(self) -> int:", "def get_info(self):\n return None", "def get_alg(self):\r\n raise NotImplementedError", "def describe():", "def info(self):\n\n print(\"pixellisation:\", self.pixel)\n print(\"number of components:\", self.ncomp)\n print(\"number of pixels:\", self.data.shape[:] if self.ncomp == 1 else self.data.shape[1:])\n print(\"nside:\", self.nside)\n print(\"geometry:\", self.geometry)\n print(\"coordinates:\", self.coordinate)", "def module_info():\n pass", "def _get_spec_info(self):\n raise NotImplementedError()", "def describe(self) -> str:", "def info(self):\r\n\r\n return self.sim_info" ]
[ "0.8159429", "0.7214091", "0.72085875", "0.7114501", "0.6941842", "0.69047254", "0.683423", "0.683423", "0.66760594", "0.6650416", "0.66345215", "0.6631911", "0.6631911", "0.65858006", "0.6542308", "0.6487851", "0.64559406", "0.6408391", "0.63860804", "0.63573605", "0.6355828", "0.63477474", "0.63317275", "0.62729084", "0.6254106", "0.62207556", "0.6198594", "0.6177911", "0.6167968", "0.61678773" ]
0.7340363
1
r"""Get parameters of the algorithm.
def getParameters(self): d = AdaptiveBatAlgorithm.getParameters(self) d.update({ 'A_l': self.A_l, 'A_u': self.A_u, 'r_l': self.r_l, 'r_u': self.r_u, 'tao_1': self.tao_1, 'tao_2': self.tao_2 }) return d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_params(self):", "def get_params(self):\n raise NotImplementedError", "def get_params(self):\n pass", "def get_params(self) -> np.array:\n pass", "def _get_params(self):\r\n return self.k._get_params()", "def parameters(self):\n return self.pars", "def get_parameters(self):\n d = Algorithm.get_parameters(self)\n d.update({\n 'M': d.pop('population_size', self.population_size),\n 'num_tests': self.num_tests,\n 'num_searches': self.num_searches,\n 'num_searches_best': self.num_searches_best,\n 'bonus1': self.bonus1,\n 'bonus2': self.bonus2,\n 'num_enabled': self.num_enabled,\n 'local_searches': self.local_searches\n })\n return d", "def getParams(self):\n return self.trainError, self.trainAcc, self.w", "def getParams(self):\n\n\t\tparams = {\"Nparticles\":self.__Nparticles,\"Nkicks\":self.__Nkicks,\"kappa\":self.__kappa, \"eta\":self.__eta,\"gamma\":self.__gamma, \"omega\":self.__omega,\n\t\t\"Kbt\":self.__Kbt, \"tk\":self.__tk}\n\n\t\treturn params", "def get_params(self):\n return self.w, self.b", "def getParameters(self):\n\n current_params = {'taux': self.taux, 'mu': self.mu, 'G': self.G, 'alpha_0': self.alpha_0,\n 'delta': self.delta, 'p': self.p, 'I0': self.I0, 'kparam': self.kparam}\n\n return (current_params)", "def parameters(self):", "def getParams(self):\n return self.W, self.b", "def get_params(self):\n return {\n \"nspecies\": self.nspecies,\n \"lmax\": self.lmax,\n \"nmax\": self.nmax,\n \"rcut\": self.rcut,\n \"sigma\": self.sigma,\n \"trans_width\": self.trans_width\n }", "def parameters(self):\n return {\"W\": self.W,\n \"T\": self.T,\n \"P\": self.P}", "def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['strategy'] = self.strategy\n paramDict['nPoints'] = self.nPoints\n return paramDict", "def parameters(self):\n return {\"P\": self.P,\n \"T\": self.T}", "def get_iperparams(self):\n\t\treturn (self.D, self.K)", "def parameters(self):\n return self._params", "def parameters(self):\n pass", "def _get_params_for_run(self):\n if self._optimizer is not None:\n return self._optimizer.get_next_params()\n else:\n return self._params", "def parameters(self):\n #print \"in instrument.parameter()\"\n return self._params", "def params(self):\n return self._pars", "def get_params(self):\n\n return None", "def get_params(self):\n\n return None", "def get_params(self):\n\n return None", "def get_hyperparams(self):", "def get_params(self):\n return {'k': self.k, 'q': self.q, 'sigma_s': self.sigma_s, 'm': self.m}", "def getParams(self):\n return self.__params", "def get_params(self):\n return self.params" ]
[ "0.7806842", "0.7683025", "0.7607203", "0.7535243", "0.75247824", "0.7506232", "0.7460225", "0.7407546", "0.7397588", "0.7395876", "0.7346837", "0.7346205", "0.7313976", "0.73048675", "0.7240246", "0.72340614", "0.7218383", "0.7201478", "0.7182246", "0.7177862", "0.71744525", "0.7151034", "0.71253383", "0.7118514", "0.7118514", "0.7118514", "0.7108019", "0.71069396", "0.7070764", "0.7051677" ]
0.7740987
1
downloads entire database and saves to .h5, replacing old file
def download_entire_db(storage_path=DEFAULT_STORAGE, remove_previous=True, return_df=False, return_latest_date=False, write=['feather']): # first check if we have the latest data if not os.path.exists(storage_path): splitpath = storage_path.split('/')[1:] # first entry is blank due to home dir / for i, p in enumerate(splitpath, 1): path = '/'.join(splitpath[:i]) if not os.path.exists(path): os.mkdir(path) zip_file_url = 'https://www.quandl.com/api/v3/databases/EOD/data?api_key=' + Q_KEY s = req.Session() s.mount('https', HTTPAdapter(max_retries=10)) r = s.get(zip_file_url) # another possible way to deal with retries # while True: # try: # r = req.get(zip_file_url, timeout=10) # break # except Exception as e: # print(e) z = zipfile.ZipFile(io.BytesIO(r.content)) z.extractall(path=storage_path) df = pd.read_csv(storage_path + \ z.filelist[0].filename, names=HEADERS, index_col=1, parse_dates=True, infer_datetime_format=True) latest_date = df.index.max().date().strftime('%Y%m%d') if 'hdf5' in write: df.to_hdf(storage_path + 'EOD_' + latest_date + '.h5', key='data', complib='blosc', complevel=9) # also write feather file so can read into R # have to reset the index because feather can't handle non-default index (maybe non-unique?) df.reset_index(inplace=True) if 'feather' in write: df.to_feather(storage_path + 'EOD_' + latest_date + '.ft') if remove_previous: for ext in ['h5', 'ft']: files = glob.glob(storage_path + 'EOD_*.' + ext) files = [f for f in files if len(f.split('/')[-1]) == 15] # don't want any of the small files, only full DBs print(sorted(files, key=os.path.getctime)) if len(files) > 1: previous_file = sorted(files, key=os.path.getctime)[-2] print('removing', previous_file) os.remove(previous_file) # delete downloaded zip file os.remove(storage_path + z.filelist[0].filename) if return_df: # set index back to normal for return_df df.set_index('Date', inplace=True) return df elif return_latest_date: return pd.to_datetime(df['Date'].max().date())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_database():\n\n # We obtain the data from the official database\n df = getData.extractData()\n\n # We save the dataframe for later use in the API\n auxiliary.saveToCsv(df, 'app/resources')", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download_data_and_save():\n url = 'https://github.com/djay/covidthailand/wiki/combined.csv'\n s=requests.get(url).content\n global df\n global last_updated\n df=pd.read_csv(io.StringIO(s.decode('utf-8')), parse_dates= ['Date'])\n df.to_parquet(file_name, compression='UNCOMPRESSED')\n df.to_csv('jaydata.csv')\n last_updated = df['Date'][df.index[-1]].strftime(\"%d %B %Y\")\n\n url = 'https://raw.githubusercontent.com/wiki/djay/covidthailand/vaccinations.csv'\n s=requests.get(url).content\n global vac_df\n vac_df=pd.read_csv(io.StringIO(s.decode('utf-8')), parse_dates= ['Date'])\n vac_df.to_parquet('vaccination.parquet', compression='UNCOMPRESSED')\n\n print(\"Data downloaded and saved successfully. Data up to \" + last_updated)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)" ]
[ "0.63205135", "0.62534106", "0.62534106", "0.62534106", "0.62534106", "0.62534106", "0.62534106", "0.6183906", "0.6038901", "0.6038901", "0.6038901", "0.6038901", "0.6038901", "0.6038901", "0.6038901", "0.6038901", "0.6038901", "0.6038901", "0.6038901", "0.6038901", "0.6038901", "0.6038901", "0.6038901", "0.6038901", "0.6038901", "0.6038901", "0.6038901", "0.6038901", "0.6038901", "0.6038901" ]
0.6550079
0
Checks to see if market is open today. Uses the pandas_market_calendars package as mcal
def check_market_status(): today_ny = datetime.datetime.now(pytz.timezone('America/New_York')) ndq = mcal.get_calendar('NASDAQ') open_days = ndq.schedule(start_date=today_ny - pd.Timedelta('10 days'), end_date=today_ny) if today_ny.date() in open_days.index: return open_days else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_market_status():\n # today = datetime.datetime.now(pytz.timezone('America/New_York')).date()\n today_utc = pd.to_datetime('now').date()\n ndq = mcal.get_calendar('NASDAQ')\n open_days = ndq.schedule(start_date=today_utc - pd.Timedelta('10 days'), end_date=today_utc)\n if today_utc in open_days.index:\n return open_days\n else:\n return None", "def marketOpen():\n tz = conf['GLOBAL']['timezone']\n today = datetime.today().astimezone(pytz.timezone(tz))\n today_fmt = today.strftime('%Y-%m-%d')\n tdHoursURL = conf['TD']['hoursURL']\n key = conf['TD']['key']\n params = {\n 'apikey': key,\n 'date': today_fmt\n }\n\n request = requests.get(\n url=tdHoursURL,\n params=params\n ).json()\n \n \n if request['equity']['EQ']['isOpen'] is True:\n return(True)\n else:\n return(False)", "def isMarketOpen(self):\n if not self.normalDay:\n return False\n now = datetime.now()\n if now.hour >= 9 and now.hour < 16:\n if now.hour == 9 and now.minute < 30:\n return False\n return True\n return False", "def check_if_open(bursa: pd.DataFrame, exchange: str) -> bool:\n exchange = exchange.upper()\n if exchange in bursa.index.values:\n tz = bursa.loc[exchange][\"timezone\"]\n exchange_df = bursa.loc[exchange]\n elif exchange in bursa[\"short_name\"].values:\n tz = bursa.loc[bursa[\"short_name\"] == exchange][\"timezone\"].values[0]\n exchange_df = bursa.loc[bursa[\"short_name\"] == exchange]\n exchange_df = exchange_df.iloc[0].transpose()\n utcmoment_naive = datetime.utcnow()\n utcmoment = utcmoment_naive.replace(tzinfo=pytz.utc)\n local_datetime = utcmoment.astimezone(pytz.timezone(tz))\n market_open = datetime.strptime(exchange_df[\"market_open\"], \"%H:%M:%S\")\n market_close = datetime.strptime(exchange_df[\"market_close\"], \"%H:%M:%S\")\n after_market_open = local_datetime.time() >= market_open.time()\n before_market_close = local_datetime.time() <= market_close.time()\n try:\n lunchbreak_start = datetime.strptime(\n exchange_df[\"lunchbreak_start\"], \"%H:%M:%S\"\n )\n lunchbreak_end = datetime.strptime(exchange_df[\"lunchbreak_end\"], \"%H:%M:%S\")\n\n after_lunch_start = local_datetime.time() >= lunchbreak_start.time()\n before_lunch_end = local_datetime.time() <= lunchbreak_end.time()\n except Exception:\n after_lunch_start = False\n before_lunch_end = False\n\n if local_datetime.weekday() >= 5:\n result = False\n else:\n result = (\n after_market_open\n and before_market_close\n and not (after_lunch_start and before_lunch_end)\n )\n\n return result", "def checkToday(ctx, cron):\n daze = ctx.obj['daze']\n if cron is not None:\n if date.today() in daze.dateDict.keys():\n sys.exit(1)\n else:\n sys.exit(0)\n click.echo(date.today() in daze.dateDict.keys())\n return date.today() in daze.dateDict.keys()", "def is_opening(self):\n now = timezone.now()\n return self.start_date.date() >= now.date()", "def office_is_open_on_date(iso_date):\n d_time = datetime.fromisoformat(iso_date)\n d_date = date(d_time.year, d_time.month, d_time.day)\n schedule = AppointmentService.APPOINTMENT_SCHEDULE.get(d_date.weekday(), {})\n return schedule != {}", "def today(self) -> bool:\n return self._algorithm.can_study_now(self._stat)", "def updateToday(tradingDay):\n if date.today() != tradingDay.today:\n tradingDay = TradingDay(tradingDay.contractDetails)\n\n if tradingDay.isMarketOpen():\n if not tradingDay.marketOpen:\n tradingDay.marketOpen = True\n console().info(\"The Market Has Opened\")\n else:\n if tradingDay.marketOpen:\n tradingDay.marketOpen = False\n console().info(\"The Market Has Closed\")\n return tradingDay", "def test_date_accept_today(self):\n spi_search = \"find date today\"\n inv_search = \"year:\" + datetime.datetime.strftime(datetime.datetime.today(), '%Y-%m-%d')\n self._compare_searches(inv_search, spi_search)", "def is_today(self, dt: datetime.datetime) -> bool:\n\n if self is Day.DAILY:\n return True\n day = dt.weekday()\n if self is Day.WEEKDAY:\n return day < 5\n if self is Day.WEEKEND:\n return day >= 5\n return Day(day) == self", "def check_if_up_to_date():\n last_daily = get_latest_dl_date()\n last_trading_day = get_last_open_trading_day()", "def isNormalTradingDay(self):\n days = self.contractDetails.tradingHours.split(\";\")\n dateString = self.today.strftime(\"%Y%m%d\")\n today = [x for x in days if x.split(\":\")[0] == dateString]\n if not today:\n console().error(\"Missing Contract Market Hours for Today.\")\n hours = today[0].split(\":\")[1]\n if hours == \"CLOSED\" or hours != config.NORMAL_TRADING_HOURS:\n return False\n return True", "def test_busy_day_output_correct(price_data):\n volume_df = price_data\n res = c.calculate_busy_day(price_data)\n\n volume_df = volume_df.loc[(volume_df['ticker'] == 'MSFT')]\n avg_volume = volume_df['volume'].sum() / len(volume_df)\n volume_df['high_volume'] = volume_df['volume'] > 1.1 * avg_volume\n busy_days = volume_df.loc[volume_df['high_volume'] == True]\n\n assert (sorted(res.loc[res['ticker'] == 'MSFT'].date.unique() ==\n busy_days.date.unique()))", "def is_no_bell_day():\n today = time.localtime()[:3]\n for r in no_bell:\n if today >= r[0] and today <= r[1]:\n return True\n return False", "def check(self):\n validity_year = int(self.date[0:4])\n validity_month = int(self.date[5:7])\n validity_day = int(self.date[8:10])\n if datetime.today().year > validity_year:\n self.flag = False\n elif datetime.today().year == validity_year:\n if datetime.today().month > validity_month:\n self.flag = False\n elif datetime.today().month == validity_month:\n if datetime.today().day > validity_day:\n self.flag = False\n else:\n self.flag = True\n else:\n self.flag = True\n else:\n self.flag = True", "def availability_exists_today(self):\n\n availability_exists = False\n\n for avail in self.availabilities:\n\n # The availability is free, so one slot exists on this day\n if avail.is_free():\n availability_exists = True\n\n break\n\n return availability_exists", "def check_today(self):\n import time\n _time = time.time\n time.time = lambda: 1003539807.89\n try:\n assert Date(\"today\") == Date(\"10/19/2001\"), \"wrong date\"\n finally:\n time.time = _time", "def isCurrentDay(self):\n t = time()\n gmt = safegmtime(t + _tzoffset(self._tz, t))\n return (gmt[0] == self._year and gmt[1] == self._month and\n gmt[2] == self._day)", "def contact_now(date: str) -> bool:\n\n time_date = string_to_datetime(date)\n return date_is_today(time_date) or date_is_in_past(time_date)", "def is_open_for_betting(self):\n return self.is_open", "def sellAtMarketOpen(self):\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n day = dt_central.strftime(\"%a\")\n\n tm = dt_central.strftime(\"%H:%M:%S\")\n\n weekdays = [\"Sat\", \"Sun\"]\n\n # CHECK IF MARKET OPEN AND NOT WEEKEND\n if tm == \"08:30\" and day not in weekdays:\n\n queue_orders = self.mongo.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Account_ID\": self.account_id, \"Order_Type\" : \"SELL\"})\n\n for order in queue_orders:\n\n # CANCEL ORDER\n resp = self.tdameritrade.cancelOrder(order[\"Order_ID\"])\n\n if resp.status_code == 200 or resp.status_code == 201:\n\n trade_data = {\n \"Symbol\": order[\"Symbol\"],\n \"Side\": \"SELL\",\n \"Aggregation\": order[\"Aggregation\"],\n \"Strategy\": order[\"Strategy\"],\n \"Asset_Type\": order[\"Asset_Type\"],\n \"Account_ID\": self.account_id\n }\n\n # SELL MARKET ORDER\n self.placeOrder(trade_data, order, orderType=\"MARKET\")", "def is_workfree(date):\n \n return date.weekday() == 6 or is_holiday(date)", "def chky(uid):\n\tx = db.checkins_today(uid)\n\tif x == None: return False\n\telse: return True", "def check_OOS_last_day(df: pandas.core.frame.DataFrame, date: datetime):\n last_day = df['Date'].max()\n if date == last_day:\n return 1\n else:\n return 0", "def runs_today(self,s_id,day):\n if self.schedule_keys[s_id][day]==1:\n return True\n else:\n return False", "def close_position(self, symbol):\n #Need to add a check for intraday_quantity, if not 0 then was bought today\n #Need to check if there are any pending orders for any options before determining quantity\n df = self.get_option_positions(symbol=symbol)\n if df.shape[0] == 0:\n return True\n exp = sorted({exp: len(list(df[df['expiration_date'] == exp]['type'].unique())) for exp in list(df['expiration_date'].unique())}.items(),key=lambda x: (x[1],x[0]))[0]\n df = df[df['expiration_date']==exp[0]].sort_values(by=['quantity','type','mark_price'], ascending=[False,False,True]) if exp[1] > 1 else df\n option = list(df['option'].unique())[0] if exp[1] > 1 else list(df[df['expiration_date']==exp[0]]['option'].unique())[0]\n self.close_option(symbol, option[:-1].split('/')[-1], 'sell' if exp[1] == 1 or get_long_short_difference(df) > 0 else 'buy', 'close', max_quantity=get_max_quantity(df, option, exp[1]))\n self.close_position(symbol)", "def will_occur(self, now):\n return self.end_repeat is None or self.end_repeat >= now.date() or \\\n self.l_start_date >= now or self.l_end_date >= now", "def checkOpenStatus(self):\n xl = win32.gencache.EnsureDispatch('Excel.Application')\n update_file = \"BoardData_update.csv\"\n update_file_pth = os.path.join(self.current_dir, 'src', update_file)\n\n if xl.Workbooks.Count > 0:\n print(\"opened: \", xl.Workbooks.Count)\n\n # if none of opened workbooks matches the name, openes my_workbook\n if any(i.Name == update_file for i in xl.Workbooks):\n print(\"It is opended\")\n xl.Workbooks.Open(Filename=update_file_pth).Close(True)\n\n else:\n print(\"It is not opended\")", "def test_is_payday_positive0(self):\n date_to_check = date_class(2018,1,12)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == True\n\n date_to_check = date_class(2018,2,23)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == True\n\n date_to_check = date_class(2018,11,16)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == True\n\n date_to_check = date_class(2018,12,28)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == True" ]
[ "0.7844019", "0.7299794", "0.71417683", "0.64564335", "0.62261474", "0.6127471", "0.6050444", "0.596668", "0.57297295", "0.5713947", "0.5685722", "0.5678899", "0.5610287", "0.55595165", "0.54880613", "0.5486532", "0.5417402", "0.540892", "0.53961575", "0.53872633", "0.5376947", "0.5373459", "0.5333964", "0.53271675", "0.5307858", "0.5302999", "0.5299542", "0.5288933", "0.52879757", "0.5278832" ]
0.7893472
0
checks if it is a trading day today, and downloads entire db after it has been updated (930pm ET) need to refactor this is messy and touchy. Have to start before midnight UTC to work ideally
def daily_download_entire_db(storage_path=DEFAULT_STORAGE): latest_db_date = get_latest_db_date() while True: latest_close_date = get_latest_close_date() if latest_db_date is None: print('no database file exists, downloading...') latest_db_date = download_entire_db(return_latest_date=True) continue today_utc = pd.to_datetime('now') today_ny = datetime.datetime.now(pytz.timezone('America/New_York')) pd_today_ny = pd.to_datetime(today_ny.date()) if latest_db_date.date() != latest_close_date.date(): if (latest_close_date.date() - latest_db_date.date()) >= pd.Timedelta('1D'): if today_ny.hour > latest_close_date.hour: print('db more than 1 day out of date, downloading...') latest_db_date = download_entire_db(return_latest_date=True) elif pd_today_ny.date() == latest_close_date.date(): # if the market is open and the db isn't up to date with today... if today_ny.hour >= 22: print('downloading db with update from today...') latest_db_date = download_entire_db(return_latest_date=True) print('sleeping 1h...') time.sleep(3600) # old code...don't think I need this anymore # open_days = check_market_status() # if open_days is not None: # close_date = open_days.loc[today_utc.date()]['market_close'] # # TODO: add check if after closing time # if today_utc.dayofyear > close_date.dayofyear or today_utc.year > close_date.year: # if today_ny.hour > 10: # need to wait until it has been processed to download # last_scrape = today_ny.date() # print('downloading db...') # download_entire_db() # else: # # need to make it wait number of hours until close # print('waiting for market to close, waiting 1 hour...') # time.sleep(3600) # else: # # need to wait till market will be open then closed next # print('market closed today, waiting 1 hour...') # time.sleep(3600) # wait 1 hour # else: # # need to make this more intelligent so it waits until the next day # print('already scraped today, waiting 1 hour to check again...') # time.sleep(3600)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_if_up_to_date():\n last_daily = get_latest_dl_date()\n last_trading_day = get_last_open_trading_day()", "def updateToday(tradingDay):\n if date.today() != tradingDay.today:\n tradingDay = TradingDay(tradingDay.contractDetails)\n\n if tradingDay.isMarketOpen():\n if not tradingDay.marketOpen:\n tradingDay.marketOpen = True\n console().info(\"The Market Has Opened\")\n else:\n if tradingDay.marketOpen:\n tradingDay.marketOpen = False\n console().info(\"The Market Has Closed\")\n return tradingDay", "def isNormalTradingDay(self):\n days = self.contractDetails.tradingHours.split(\";\")\n dateString = self.today.strftime(\"%Y%m%d\")\n today = [x for x in days if x.split(\":\")[0] == dateString]\n if not today:\n console().error(\"Missing Contract Market Hours for Today.\")\n hours = today[0].split(\":\")[1]\n if hours == \"CLOSED\" or hours != config.NORMAL_TRADING_HOURS:\n return False\n return True", "def checkToday(ctx, cron):\n daze = ctx.obj['daze']\n if cron is not None:\n if date.today() in daze.dateDict.keys():\n sys.exit(1)\n else:\n sys.exit(0)\n click.echo(date.today() in daze.dateDict.keys())\n return date.today() in daze.dateDict.keys()", "def isMarketOpen(self):\n if not self.normalDay:\n return False\n now = datetime.now()\n if now.hour >= 9 and now.hour < 16:\n if now.hour == 9 and now.minute < 30:\n return False\n return True\n return False", "async def daily(self, ctx: commands.Context):\n self.check_if_exist(ctx.guild)\n\n if ctx.invoked_subcommand == None:\n await ctx.reply(\"Options: `channel`, `timezone`, `ping`\")", "def check_market_status():\n # today = datetime.datetime.now(pytz.timezone('America/New_York')).date()\n today_utc = pd.to_datetime('now').date()\n ndq = mcal.get_calendar('NASDAQ')\n open_days = ndq.schedule(start_date=today_utc - pd.Timedelta('10 days'), end_date=today_utc)\n if today_utc in open_days.index:\n return open_days\n else:\n return None", "def runs_today(self,s_id,day):\n if self.schedule_keys[s_id][day]==1:\n return True\n else:\n return False", "def check_market_status():\n today_ny = datetime.datetime.now(pytz.timezone('America/New_York'))\n ndq = mcal.get_calendar('NASDAQ')\n open_days = ndq.schedule(start_date=today_ny - pd.Timedelta('10 days'), end_date=today_ny)\n if today_ny.date() in open_days.index:\n return open_days\n else:\n return None", "def logDayDetails(self):\n console().info(\"Today is {}.\".format(self.today.strftime(DATE_FMT)))\n hours = self.contractDetails.tradingHours.split(\";\")[0].split(\":\")[1]\n console().info(\"Today's Trading Hours Are: {}\".format(hours))\n if self.normalDay:\n console().info(\"Today is a Valid Day for Trading\")\n else:\n console().info(\"Today is not a Valid Trading Day. Sleeping Until Tomorrow\")", "def test_date_accept_today(self):\n spi_search = \"find date today\"\n inv_search = \"year:\" + datetime.datetime.strftime(datetime.datetime.today(), '%Y-%m-%d')\n self._compare_searches(inv_search, spi_search)", "def do_upt(self, arg):\n self.do_timesheet('update today')", "def download_stocks(stocklist=STOCKLIST, fresh=False):\n # load stocklist\n with open(stocklist) as f:\n stocks = f.read().strip('\\n').split('\\n')\n\n dfs = {}\n for s in stocks:\n print(s)\n stockfile = '../stockdata/' + s + '.csv.gz'\n if fresh or not os.path.exists(stockfile):\n print('downloading fresh')\n stock = quandl.get('EOD/' + s)\n stock.to_csv(stockfile, compression='gzip')\n dfs[s] = stock\n continue\n\n else:\n stock = pd.read_csv(stockfile, index_col=0)\n stock.index = pd.to_datetime(stock.index)\n timedelta_step = 1\n if HOUR > 2 and WEEKDAY not in [5, 6]: # for mtn time\n timedelta_step = 0\n elif WEEKDAY == 0: # it's monday\n timedelta_step = 3 # can be up to last friday\n elif WEEKDAY in [5, 6]: # if a weekend, last data is from friday\n timedelta_step = WEEKDAY - 4\n print('date gap:', TODAY.date() - stock.iloc[-2:].index[-1].date())\n print('step, timedelta:', timedelta_step, datetime.timedelta(timedelta_step))\n if (TODAY.date() - stock.iloc[-2:].index[-1].date()) <= datetime.timedelta(timedelta_step):\n dfs[s] = stock\n print('latest date close enough to up-to-date:')\n print(stock.iloc[-2:].index[-1].date())\n print('not downloading')\n print('')\n continue\n else:\n print('latest date is')\n print(stock.iloc[-2:].index[-1].date())\n print('downloading fresh')\n stock = quandl.get('EOD/' + s)\n stock.to_csv(stockfile, compression='gzip')\n dfs[s] = stock\n\n return dfs", "def extra_tasks_for_today(self):\n localtz = tzlocal()\n datetime_today = datetime.fromtimestamp(rospy.get_rostime().to_sec(), tz=localtz)\n day_today = datetime_today.strftime(\"%A\")\n date_today = datetime_today.date()\n rospy.loginfo('Looking for daily tasks for %s, %s' % (day_today, date_today))\n \n eight_forty_five= time(8,45, tzinfo=localtz)\n eleven_thirty= time(11,30, tzinfo=localtz)\n fourteen_thirty=time(14,30, tzinfo=localtz)\n seventeen_fifteen= time(17,15, tzinfo=localtz)\n past_bedtime = time(23,59, tzinfo=localtz)\n \n # day_end = seventeen_fifteen\n day_end = past_bedtime\n\n\n\n metric_wps=['WayPoint13', 'WayPoint18', 'WayPoint9','WayPoint11','WayPoint5','WayPoint3'] \n object_learn_wps=['WayPoint13', 'WayPoint18', 'WayPoint9', 'WayPoint11'] \n object_search_wps=['WayPoint1', 'WayPoint2', 'WayPoint3']\n door_wps=['WayPoint7', 'WayPoint4']\n \n morning_start = eight_forty_five\n morning_duration = delta_between(eleven_thirty, morning_start)\n \n lunch_start = eleven_thirty\n lunch_duration = delta_between(fourteen_thirty, lunch_start)\n\n afternoon_start = fourteen_thirty\n afternoon_duration = delta_between(day_end, afternoon_start)\n\n tasks = []\n \n #door checks at fixed times (to evaluate system ability to do stuff at corret times)\n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(10,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(13,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(16,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n \n #random tasks\n for i in range(4):\n #morning\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n if i<3:\n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n #lunch (less tasks because we want the robot mostly learning people tracks)\n if i<1:\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n \n #afternoon\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n if i<3:\n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n return tasks", "def is_today(self, dt: datetime.datetime) -> bool:\n\n if self is Day.DAILY:\n return True\n day = dt.weekday()\n if self is Day.WEEKDAY:\n return day < 5\n if self is Day.WEEKEND:\n return day >= 5\n return Day(day) == self", "def get_today():\n global endDate\n check_today=date_trans_x(time.strftime(\"%Y.%m.%d\"))\n if(get_article_ammount(check_today,True)>0): \n diff=datetime.strptime(check_today,'%Y.%m.%d')-datetime.strptime(endDate,'%Y.%m.%d')\n if(diff.total_seconds()>=0):\n endDate=check_today\n print \"get_today : \"+ check_today\n return check_today\n else:\n print \"get_today : \"+ endDate\n return endDate", "def check_today(self):\n import time\n _time = time.time\n time.time = lambda: 1003539807.89\n try:\n assert Date(\"today\") == Date(\"10/19/2001\"), \"wrong date\"\n finally:\n time.time = _time", "def _check_day_data(self, datetime):\n if self.curr_day_data is None or self.compare_dates(self.curr_day_data.index[0], datetime) is False:\n date = dt.datetime(year=datetime.year, month=datetime.month, day=datetime.day)\n symbols = [product.symbol for product in self.products]\n self.curr_day_data = get_data_multi(symbols, date, second_bars=self.second_bars)\n self.clear_resting_orders()", "def today():\n return datetime.today()", "async def daily(self, ctx):\r\n # TODO: Asssess whether this can be cleaned up. \r\n # As it stands, very similar to inv()\r\n author = ctx.author\r\n with DB() as db:\r\n company = await self.get_active_company(ctx, db, author)\r\n stock = self.iex.get_held_stocks(db, company.id)\r\n inventory = []\r\n for s in stock:\r\n close = await self.get_latest_close(ctx, db, s.symbol)\r\n inventory.append([s.symbol, s.quantity, s.purchase_price, close.close, s.quantity*close.close - s.quantity*s.purchase_price ]) \r\n inv_df = pd.DataFrame(inventory, columns=['Symbol', 'Quantity', 'Purchase Price', 'Close', 'Current Value'])\r\n inv_df['sign'] = np.where(inv_df['Current Value']>=0, '+', '-')\r\n inv_df['%'] = abs(((inv_df['Close'] - inv_df['Purchase Price']) / inv_df['Purchase Price']) * 100)\r\n inv_df['%'] = inv_df['%'].round(1)\r\n inv_df = inv_df.sort_values(['Symbol'])\r\n inv_df = inv_df[['sign', '%', 'Symbol', 'Quantity', 'Purchase Price', 'Close', 'Current Value']]\r\n aggregated = tabulate(inv_df.values.tolist(), headers=['Δ', '%', 'Symbol', 'Quantity', 'Purchase Price', 'Close', 'Current Value'])\r\n await ctx.send(f'```diff\\n{aggregated}```')", "def check_day_advance(self):\n days_ago = datetime.now().toordinal() - self.start_time.toordinal()\n if days_ago:\n # New day. Save data for the old day.\n self.save(days_ago = days_ago)\n self.start_time = datetime.now()\n # Reset all counters back to 0:00:00.\n for rd in self.row_detail_list:\n rd.time = '0:00:00'\n self.refresh_display()", "def __download(self, since = workingday(1900,1,1)):\n\t\tuntil = workingday.today()\n\n\t\tinput_tuple = (self.symbol,\n\t\t\tstr(since.month - 1), str(since.day), str(since.year),\n\t\t\tstr(until.month - 1), str(until.day), str(until.year))\n\n\t\tself.price = dict()\n\t\tself.dividend = dict()\n\t\tself.split = dict()\n\n\t\ttry:\n\t\t\turl = 'http://ichart.yahoo.com/table.csv?s=%s&g=d&a=%s&b=%s&c=%s&d=%s&e=%s&f=%s&ignore=.csv' % input_tuple\n\t\t\traw_data = urlopen(url)\n\t\t\traw_data.readline()\n\n\t\t\tfor line in raw_data:\n\t\t\t\tl = line.split(',')\n\t\t\t\td = workingday.strptime(l[0],'%Y-%m-%d')\n\t\t\t\trow = [\n\t\t\t\t\tfloat(l[1]), # Open\n\t\t\t\t\tfloat(l[2]), # High\n\t\t\t\t\tfloat(l[3]), # Low\n\t\t\t\t\tfloat(l[4]), # Close\n\t\t\t\t\tfloat(l[-1][:-1]), # Adj\n\t\t\t\t\tint(l[5])] # Volume\n\t\t\t\tself.price[d] = row\n\n\t\t\t# get dividend and split data\n\t\t\turl\t= 'http://ichart.finance.yahoo.com/x?s=%s&g=v&a=%s&b=%s&c=%s&d=%s&e=%s&f=%s&ignore=.csv' % input_tuple\n\t\t\traw_data = urlopen(url)\n\t\t\traw_data.readline()\n\n\t\t\tfor line in raw_data:\n\t\t\t\tl = line.split(',')\n\t\t\t\tif l[0] == 'DIVIDEND':\n\t\t\t\t\td = workingday(int(l[1][1:5]), int(l[1][5:7]), int(l[1][7:9]))\n\t\t\t\t\tself.dividend[d] = float(l[2][:-1])\n\t\t\t\telif l[0] == 'SPLIT':\n\t\t\t\t\td = workingday(int(l[1][1:5]), int(l[1][5:7]), int(l[1][7:9]))\n\t\t\t\t\tself.split[d] = tuple(map(int, l[2][:-1].split(':')))\n\n\t\texcept:\n\t\t\tprint 'Error downloading ' + self.symbol", "def get_today():\n return datetime.today()", "def today(self):\n if self.time.day != datetime.datetime.now().day or self._today is None:\n self._update_time()\n return self._today", "def is_outdated(self):\n today = datetime.datetime.today()\n day = datetime.datetime.combine(self.date, self.start_time)\n return day <= today", "def qToday():\n \n return _qDate.todaysDate().ISO()", "def isOnSaleSoon(ticket):\n return ticket.start_time > timezone.now()", "def TODAY():\n return datetime.date.today()", "def brasilia_day():\n return (dt.datetime.utcnow() + dt.timedelta(hours=-3)).replace(hour=0, minute=0, second=0, microsecond=0)", "def get_latest_trading_date(date, url, service_key):\n holidays = get_holidays(date.year, url, service_key)\n holidays.append(datetime.datetime(year=date.year, month=12, day=31))\n holidays = tuple(holidays)\n while date.weekday() in (5, 6) or date in holidays:\n # 0:MON, 1:TUE, 2:WED, 3:THU, 4:FRI, 5:SAT, 6:SUN\n date = date - datetime.timedelta(days=1)\n return date" ]
[ "0.6510461", "0.635234", "0.6323189", "0.62475073", "0.6163681", "0.61534476", "0.60295653", "0.5952129", "0.5790337", "0.5765544", "0.57649076", "0.56893873", "0.56848735", "0.56789535", "0.5656514", "0.5644254", "0.5641715", "0.554061", "0.5536549", "0.5522466", "0.551798", "0.54995865", "0.54813784", "0.54338574", "0.5428324", "0.5418975", "0.5412819", "0.54046494", "0.5381602", "0.53649545" ]
0.6581072
0
gets the latest date the markets were open (NASDAQ), and returns the closing datetime if last_close is True, gets last datetime that market has closed (not in the future)
def get_latest_close_date(market='NASDAQ', return_time=False, last_close=False): # today = datetime.datetime.now(pytz.timezone('America/New_York')).date() # today_utc = pd.to_datetime('now').date() today_ny = datetime.datetime.now(pytz.timezone('America/New_York')) ndq = mcal.get_calendar(market) open_days = ndq.schedule(start_date=today_ny - pd.Timedelta('10 days'), end_date=today_ny) if last_close: past = open_days[open_days['market_close'] <= pd.to_datetime('now').tz_localize('UTC')] return past.iloc[-1]['market_close'] return open_days.iloc[-1]['market_close']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_last_close(self, symbol):\n if symbol in self.symbol:\n close_price = self.symbol[symbol][\"close\"]\n return close_price\n else:\n print(\n \"Close price for ticker %s is not \"\n \"available from the YahooDailyBarPriceHandler.\"\n )\n return None", "def latest_close(df):\n return df['close'].iloc[-1]", "def last_close(self):\n return self.data.last('1D').close.iat[0]", "def next_close(self):\n return self.market.next_close(\n getattr(self, 'when',\n pd.Timestamp(datetime.now(tz=pytz.UTC)))\n )", "def get_latest_bar_close(self, symbol):\n try:\n bars_list = self.latest_symbol_data[symbol]\n except KeyError:\n print(\"That symbol is not available in the historical data\")\n raise\n else:\n return getattr(bars_list[-1][1], 'Close')", "def close_position(self, symbol):\n #Need to add a check for intraday_quantity, if not 0 then was bought today\n #Need to check if there are any pending orders for any options before determining quantity\n df = self.get_option_positions(symbol=symbol)\n if df.shape[0] == 0:\n return True\n exp = sorted({exp: len(list(df[df['expiration_date'] == exp]['type'].unique())) for exp in list(df['expiration_date'].unique())}.items(),key=lambda x: (x[1],x[0]))[0]\n df = df[df['expiration_date']==exp[0]].sort_values(by=['quantity','type','mark_price'], ascending=[False,False,True]) if exp[1] > 1 else df\n option = list(df['option'].unique())[0] if exp[1] > 1 else list(df[df['expiration_date']==exp[0]]['option'].unique())[0]\n self.close_option(symbol, option[:-1].split('/')[-1], 'sell' if exp[1] == 1 or get_long_short_difference(df) > 0 else 'buy', 'close', max_quantity=get_max_quantity(df, option, exp[1]))\n self.close_position(symbol)", "def get_last_price(self, stock_object, time_zone=None):\n time_zone = TraderBase.get_timezone()\n if self.client:\n return self.client.get_last_price(stock_object)\n # get last stock price by database\n price = self.db_tool.session.query(Series)\\\n .join(Stock)\\\n .filter(Stock.id == stock_object.id)\\\n .filter(Series.date <= datetime.datetime.now(time_zone))\\\n .order_by(-Series.date).first()\n\n if not price:\n return None\n return price.priceclose", "def get_max_close(symbol):\n df = pd.read_csv(\"data/{}.csv\".format(symbol)) # read in data\n return df['Close'].max() # compute and return max", "def latest_price(self, symbol: str, state: Series, is_backtest: bool, crypto: bool, side: str = 'buy') -> float:\n if is_backtest:\n if crypto:\n if side == 'buy': return state['close']\n else: return state['close']\n else: return state['close']\n else:\n if crypto:\n try: ask, bid = self.cb_client.latest_symbol_price(symbol)\n except Exception as e:\n print(e)\n raise Exception(f'Latest crypto buy-side tick data for {symbol} not available: ', e)\n\n print(f'latest_price:spread: {ask-bid:.5f} ({(ask-bid)/ask*100:.3f})')\n if side == 'buy': return ask\n else: return bid\n else:\n if side == 'buy':\n if symbol in self.ib_client.latest:\n try: return float(self.ib_client.latest[symbol].ask)\n except: return self.ib_client.latest[symbol].ask\n else:\n print(f'{symbol} buy-side not in {self.ib_client.latest}')\n raise 'Latest market buy-side tick data for {} not available'.format(symbol)\n else:\n if symbol in self.ib_client.latest:\n try: return float(self.ib_client.latest[symbol].bid)\n except: return self.ib_client.latest[symbol].bid\n else:\n print(f'{symbol} sell-side not in {self.ib_client.latest}')\n raise 'Latest market sell-side tick data for {} not available'.format(symbol)", "def getClosingPrice(self):\t\n\t\treturn self.dataPoints[-1].getDate(), self.dataPoints[-1].getAdjustedValue()", "def get_latest_bar_datetime(self, symbol):\n raise NotImplementedError(\"Should implement get_latest_bar_datetime()\")", "def marketOpen():\n tz = conf['GLOBAL']['timezone']\n today = datetime.today().astimezone(pytz.timezone(tz))\n today_fmt = today.strftime('%Y-%m-%d')\n tdHoursURL = conf['TD']['hoursURL']\n key = conf['TD']['key']\n params = {\n 'apikey': key,\n 'date': today_fmt\n }\n\n request = requests.get(\n url=tdHoursURL,\n params=params\n ).json()\n \n \n if request['equity']['EQ']['isOpen'] is True:\n return(True)\n else:\n return(False)", "def get_latest_bar_datetime(self, symbol):\n try:\n bars_list = self.latest_symbol_data[symbol]\n except KeyError:\n print(\"That symbol is not available in the historical data\")\n raise\n else:\n return getattr(bars_list[-1][1], 'Time')", "def check_market_status():\n today_ny = datetime.datetime.now(pytz.timezone('America/New_York'))\n ndq = mcal.get_calendar('NASDAQ')\n open_days = ndq.schedule(start_date=today_ny - pd.Timedelta('10 days'), end_date=today_ny)\n if today_ny.date() in open_days.index:\n return open_days\n else:\n return None", "def check_market_status():\n # today = datetime.datetime.now(pytz.timezone('America/New_York')).date()\n today_utc = pd.to_datetime('now').date()\n ndq = mcal.get_calendar('NASDAQ')\n open_days = ndq.schedule(start_date=today_utc - pd.Timedelta('10 days'), end_date=today_utc)\n if today_utc in open_days.index:\n return open_days\n else:\n return None", "def updateLastPrice(self):\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(\n pytz.timezone('US/Central')).strftime(\"%H:%M\")\n\n # UPDATE POSITION LAST PRICE AND UPDATE HIGH PRICE\n open_positions = self.open_positions.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id})\n\n open_positions_list = []\n\n for position in open_positions:\n\n symbol = position[\"Symbol\"]\n\n if symbol not in open_positions_list:\n\n open_positions_list.append(symbol)\n\n if len(open_positions_list) > 0:\n\n resp = self.tdameritrade.getQuotes(open_positions_list)\n\n if resp:\n\n for key, value in resp.items():\n\n symbol = key\n\n last_price = value[\"lastPrice\"]\n\n self.open_positions.update_many({\"Trader\": self.user[\"Name\"], \"Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Last_Price\": last_price}})\n\n if dt_central == \"15:00\":\n\n self.open_positions.update_many({\"Trader\": self.user[\"Name\"], \"Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Opening_Price\": last_price}})\n\n # UPDATE QUEUE LAST PRICE\n queues = self.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type})\n\n queues_list = []\n\n for queue in queues:\n\n if self.asset_type == \"EQUITY\":\n\n symbol = queue[\"Symbol\"]\n\n elif self.asset_type == \"OPTION\":\n\n symbol = queue[\"Pre_Symbol\"]\n\n if symbol not in queues_list:\n\n queues_list.append(symbol)\n\n if len(queues_list) > 0:\n\n resp = self.tdameritrade.getQuotes(queues_list)\n\n for key, value in resp.items():\n\n symbol = key\n\n last_price = value[\"lastPrice\"]\n\n if self.asset_type == \"EQUITY\":\n\n self.queue.update_many({\"Trader\": self.user[\"Name\"], \"Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Last_Price\": last_price}})\n\n elif self.asset_type == \"OPTION\":\n\n self.queue.update_many({\"Trader\": self.user[\"Name\"], \"Pre_Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Last_Price\": last_price}})", "def getFullPriceHistory(self, stockSymbol, stockExchange):\n response = requests.get(\"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={}:{}&outputsize=full&apikey={}\".format(\n stockExchange, stockSymbol, self.ALPHA_VANTAGE_SECRET_KEY))\n data = response.json()\n timestamps, aClose = [], []\n for key in data['Time Series (Daily)']:\n timestamps.append(key)\n dates = [datetime.strptime(\n ts, \"%Y-%m-%d\") for ts in timestamps]\n dates.sort()\n dates.reverse()\n Dates = [datetime.strftime(ts, \"%Y-%m-%d\") for ts in dates]\n for date in Dates:\n aClose.append(\n float(data['Time Series (Daily)'][date]['5. adjusted close']))\n return (Dates, aClose)", "def current_close(self):\n open = self._prices.open[self._offset]\n relativ_close = self._prices.close[self._offset]\n return open * (1.0 + relativ_close)", "def get_benchmark_returns(symbol, first_date, last_date):\n if symbol == '^GSPC':\n symbol = 'spy'\n\n data = pd_reader.DataReader(\n symbol,\n 'google',\n first_date,\n last_date\n )\n\n data = data['Close']\n\n data[pd.Timestamp('2008-12-15')] = np.nan\n data[pd.Timestamp('2009-08-11')] = np.nan\n data[pd.Timestamp('2012-02-02')] = np.nan\n\n data = data.fillna(method='ffill')\n\n return data.sort_index().tz_localize('UTC').pct_change(1).iloc[1:]", "def db_last_update_date(ticker='AMZN', db_name='test_stock_raw', collection_name='amzn_raw', query_end_date=datetime.now(), db_first_date=datetime(2015,1,1)):\n stock_db = db_client[db_name]\n stock_collection = stock_db[collection_name]\n\n if collection_name in stock_db.list_collection_names():\n\n date_30_days_ago = query_end_date-timedelta(days=30)\n # query stock data for the past 30 days from query_end_date\n query_result = stock_collection.find({'Stock':ticker,\n 'Datetime': {'$gte': date_30_days_ago, '$lte': query_end_date}})\n\n if query_result.count() > 0:\n print(f'query_result.count() = {query_result.count()} for the past 30 days from {query_end_date}')\n \n else:\n print('query_result.count() = 0 for the past 30 days')\n query_result = stock_collection.find({ 'Stock':ticker,\n 'Datetime': {'$gte': datetime(2015,1,1), '$lte': query_end_date}})\n\n result_date_list = []\n for x in list(query_result):\n result_date_list.append(x['Datetime']) \n # print(f'result_date_list from the query = {result_date_list}')\n\n if len(result_date_list) == 0:\n print(f'result_date_list is empty!!!')\n \n else:\n collection_last_date = max(result_date_list)\n print(f'mongodb collection_last_date = {collection_last_date}')\n \n \n else:\n print(\"Creating a new collection since it doesn't exist.......\")\n print(\"Stock data between 2015-01-01 and today will be uploaded by default, unless selected otherwise.\")\n collection_last_date = db_first_date\n \n return collection_last_date", "def get_last_update(self):\n return self.ticker.all().order_by('-created').first()", "def get_closed_order():\n try:\n result = EXCHANGE.fetch_closed_orders(CONF.pair, since=None, limit=3, params={'reverse': True})\n if result is not None and len(result) > 0:\n orders = sorted(result, key=lambda order: order['datetime'])\n last_order = Order(orders[-1])\n LOG.info('Last %s', str(last_order))\n return last_order\n return None\n\n except (ccxt.ExchangeError, ccxt.NetworkError) as error:\n LOG.error(RETRY_MESSAGE, type(error).__name__, str(error.args))\n sleep_for(4, 6)\n get_closed_order()", "def get_last_price_tmp(market):\n\trequest = api.get_ticker(market)\n\tif not request['message']:\n\t\tlast = str(request['result']['Last'])\n\t\treturn (last)\n\telse:\n\t\tprint(request['message'])\n\t\tsys.exit(0)", "def get_last_available_price(self, tickers: Union[Ticker, Sequence[Ticker]],\n frequency: Frequency = None) -> Union[float, QFSeries]:\n frequency = frequency or self.fixed_data_provider_frequency or Frequency.MIN_1\n\n if frequency <= Frequency.DAILY:\n raise ValueError(\"The Intraday Data Handler can be used only with the Intraday Frequency\")\n\n tickers, was_single_ticker_provided = convert_to_list(tickers, Ticker)\n\n # if an empty tickers list was supplied then return an empty result\n if not tickers:\n return QFSeries()\n\n current_datetime = self.timer.now()\n\n # If the current_datetime represents the time after Market Close and before Market Open, shift it to the\n # Market Close of the day before\n if current_datetime + MarketOpenEvent.trigger_time() > current_datetime:\n current_datetime = current_datetime - RelativeDelta(days=1)\n current_datetime = current_datetime + MarketCloseEvent.trigger_time()\n elif current_datetime + MarketCloseEvent.trigger_time() < current_datetime:\n current_datetime = current_datetime + MarketCloseEvent.trigger_time()\n\n # If the current_datetime represents Saturday or Sunday, shift it to last Friday\n if current_datetime.weekday() in (5, 6):\n current_datetime = current_datetime - RelativeDelta(weekday=4, weeks=1)\n\n # The time range denotes the current_datetime +- time delta related to the given frequency. The current price is\n # represented as the close price of (time_range_start, current_datetime) range, labeled using the time_range_\n # start value in most of the cases.\n #\n # The only exception is the price at the market open - in this case we do not have the bar directly\n # leading up to market open time. Thus, the open price from the time range (current_datetime, time_range_end)\n # is used to denote the price.\n\n time_range_start = current_datetime - frequency.time_delta()\n time_range_end = current_datetime + frequency.time_delta()\n\n # The start date is used to download older data, in case if there is no price available currently and we are\n # interested in the last available one. Therefore, at first we look one hour in the past. If this amount of data\n # would not be sufficient, we would look up to a few days in the past.\n\n download_start_date = current_datetime - Frequency.MIN_60.time_delta()\n\n def download_prices(start_time, end_time, multiple_days=False):\n # Function which downloads prices for the given tickers. In case if the time range spans over multiple days\n # and thus contains at least one Market Open Event, combine the Open price for the first bar after the\n # market open with the Close prices for all other bars from this day.\n if multiple_days:\n price_fields = [PriceField.Open, PriceField.Close]\n prices = self.data_provider.get_price(tickers, price_fields, start_time,\n end_time, frequency)\n return self._data_array_to_dataframe(prices, frequency)\n else:\n return self.data_provider.get_price(tickers, PriceField.Close, start_time,\n end_time, frequency)\n\n # If the data contains the Market Open Price, merge the prices\n if download_start_date <= MarketOpenEvent.trigger_time() + time_range_end <= time_range_end:\n contains_market_open = True\n elif download_start_date <= MarketOpenEvent.trigger_time() + download_start_date <= time_range_end:\n contains_market_open = True\n elif (time_range_end - download_start_date) > timedelta(days=1):\n contains_market_open = True\n else:\n contains_market_open = False\n\n prices_data_array = download_prices(download_start_date, time_range_end, contains_market_open)\n\n # Access the price bar starting at time_range_start and ending at current_datetime\n try:\n prices_series = prices_data_array.asof(time_range_start)\n prices_series.name = \"Last available asset prices\"\n\n if prices_series.isnull().values.any():\n # If any of the values is null, download more data, using a longer period of time\n raise IndexError\n\n except IndexError:\n # Download data using a longer period of time. In case of Monday or Tuesday, we download data from last 4\n # days in order to handle situations, were there was no price on Monday or Friday (and during the weekend).\n # In all other cases, we download data from the last 2 days.\n number_of_days_to_go_back = 2 if download_start_date.weekday() not in (0, 1) else 4\n prices_data_array = download_prices(download_start_date - RelativeDelta(days=number_of_days_to_go_back),\n time_range_end,\n multiple_days=True)\n\n prices_series = prices_data_array.asof(time_range_start)\n prices_series.name = \"Last available asset prices\"\n\n prices_series = cast_series(prices_series, QFSeries)\n if was_single_ticker_provided:\n return prices_series[0]\n else:\n return prices_series", "def last_candle(self):\r\n if self.length() > 0:\r\n return self.candles[0]\r\n else:\r\n return None", "def get_last_price(args):\n\tmarket = get_market(args)\n\trequest = api.get_ticker(market)\n\tif not request['message']:\n\t\tlast = str(request['result']['Last'])\n\t\treturn (last)\n\telse:\n\t\tprint(request['message'])\n\t\tsys.exit(0)", "def get_latest_bar(self, symbol):\n try:\n bars_list = self.latest_symbol_data[symbol]\n except KeyError:\n print(\"That symbol is not available in the historical data\")\n raise\n else:\n return bars_list[-1]", "def closed_at(self):\n return string_to_datetime(self._dict.get('closed_at'))", "def get_latest_trading_date(date, url, service_key):\n holidays = get_holidays(date.year, url, service_key)\n holidays.append(datetime.datetime(year=date.year, month=12, day=31))\n holidays = tuple(holidays)\n while date.weekday() in (5, 6) or date in holidays:\n # 0:MON, 1:TUE, 2:WED, 3:THU, 4:FRI, 5:SAT, 6:SUN\n date = date - datetime.timedelta(days=1)\n return date", "def get_last_timestamp(self, symbol):\n if symbol in self.symbol:\n timestamp = self.symbol[symbol][\"timestamp\"]\n return timestamp\n else:\n print(\"Timestamp for symbol {} is not available from {}.\".format(symbol,\n self.__class__.__name__))\n return None" ]
[ "0.7029767", "0.68433356", "0.676066", "0.65362865", "0.63972265", "0.62123173", "0.6211086", "0.60124016", "0.59920776", "0.59762216", "0.5918298", "0.58333004", "0.5784651", "0.5730271", "0.5675742", "0.5673005", "0.56588376", "0.56233525", "0.56153977", "0.5592313", "0.5549718", "0.55486995", "0.55048454", "0.54865706", "0.548215", "0.546547", "0.5456136", "0.54502535", "0.5409637", "0.53495395" ]
0.8209045
0
Checks to see if market is open today. Uses the pandas_market_calendars package as mcal
def check_market_status(): # today = datetime.datetime.now(pytz.timezone('America/New_York')).date() today_utc = pd.to_datetime('now').date() ndq = mcal.get_calendar('NASDAQ') open_days = ndq.schedule(start_date=today_utc - pd.Timedelta('10 days'), end_date=today_utc) if today_utc in open_days.index: return open_days else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_market_status():\n today_ny = datetime.datetime.now(pytz.timezone('America/New_York'))\n ndq = mcal.get_calendar('NASDAQ')\n open_days = ndq.schedule(start_date=today_ny - pd.Timedelta('10 days'), end_date=today_ny)\n if today_ny.date() in open_days.index:\n return open_days\n else:\n return None", "def marketOpen():\n tz = conf['GLOBAL']['timezone']\n today = datetime.today().astimezone(pytz.timezone(tz))\n today_fmt = today.strftime('%Y-%m-%d')\n tdHoursURL = conf['TD']['hoursURL']\n key = conf['TD']['key']\n params = {\n 'apikey': key,\n 'date': today_fmt\n }\n\n request = requests.get(\n url=tdHoursURL,\n params=params\n ).json()\n \n \n if request['equity']['EQ']['isOpen'] is True:\n return(True)\n else:\n return(False)", "def isMarketOpen(self):\n if not self.normalDay:\n return False\n now = datetime.now()\n if now.hour >= 9 and now.hour < 16:\n if now.hour == 9 and now.minute < 30:\n return False\n return True\n return False", "def check_if_open(bursa: pd.DataFrame, exchange: str) -> bool:\n exchange = exchange.upper()\n if exchange in bursa.index.values:\n tz = bursa.loc[exchange][\"timezone\"]\n exchange_df = bursa.loc[exchange]\n elif exchange in bursa[\"short_name\"].values:\n tz = bursa.loc[bursa[\"short_name\"] == exchange][\"timezone\"].values[0]\n exchange_df = bursa.loc[bursa[\"short_name\"] == exchange]\n exchange_df = exchange_df.iloc[0].transpose()\n utcmoment_naive = datetime.utcnow()\n utcmoment = utcmoment_naive.replace(tzinfo=pytz.utc)\n local_datetime = utcmoment.astimezone(pytz.timezone(tz))\n market_open = datetime.strptime(exchange_df[\"market_open\"], \"%H:%M:%S\")\n market_close = datetime.strptime(exchange_df[\"market_close\"], \"%H:%M:%S\")\n after_market_open = local_datetime.time() >= market_open.time()\n before_market_close = local_datetime.time() <= market_close.time()\n try:\n lunchbreak_start = datetime.strptime(\n exchange_df[\"lunchbreak_start\"], \"%H:%M:%S\"\n )\n lunchbreak_end = datetime.strptime(exchange_df[\"lunchbreak_end\"], \"%H:%M:%S\")\n\n after_lunch_start = local_datetime.time() >= lunchbreak_start.time()\n before_lunch_end = local_datetime.time() <= lunchbreak_end.time()\n except Exception:\n after_lunch_start = False\n before_lunch_end = False\n\n if local_datetime.weekday() >= 5:\n result = False\n else:\n result = (\n after_market_open\n and before_market_close\n and not (after_lunch_start and before_lunch_end)\n )\n\n return result", "def checkToday(ctx, cron):\n daze = ctx.obj['daze']\n if cron is not None:\n if date.today() in daze.dateDict.keys():\n sys.exit(1)\n else:\n sys.exit(0)\n click.echo(date.today() in daze.dateDict.keys())\n return date.today() in daze.dateDict.keys()", "def is_opening(self):\n now = timezone.now()\n return self.start_date.date() >= now.date()", "def office_is_open_on_date(iso_date):\n d_time = datetime.fromisoformat(iso_date)\n d_date = date(d_time.year, d_time.month, d_time.day)\n schedule = AppointmentService.APPOINTMENT_SCHEDULE.get(d_date.weekday(), {})\n return schedule != {}", "def today(self) -> bool:\n return self._algorithm.can_study_now(self._stat)", "def updateToday(tradingDay):\n if date.today() != tradingDay.today:\n tradingDay = TradingDay(tradingDay.contractDetails)\n\n if tradingDay.isMarketOpen():\n if not tradingDay.marketOpen:\n tradingDay.marketOpen = True\n console().info(\"The Market Has Opened\")\n else:\n if tradingDay.marketOpen:\n tradingDay.marketOpen = False\n console().info(\"The Market Has Closed\")\n return tradingDay", "def test_date_accept_today(self):\n spi_search = \"find date today\"\n inv_search = \"year:\" + datetime.datetime.strftime(datetime.datetime.today(), '%Y-%m-%d')\n self._compare_searches(inv_search, spi_search)", "def is_today(self, dt: datetime.datetime) -> bool:\n\n if self is Day.DAILY:\n return True\n day = dt.weekday()\n if self is Day.WEEKDAY:\n return day < 5\n if self is Day.WEEKEND:\n return day >= 5\n return Day(day) == self", "def check_if_up_to_date():\n last_daily = get_latest_dl_date()\n last_trading_day = get_last_open_trading_day()", "def isNormalTradingDay(self):\n days = self.contractDetails.tradingHours.split(\";\")\n dateString = self.today.strftime(\"%Y%m%d\")\n today = [x for x in days if x.split(\":\")[0] == dateString]\n if not today:\n console().error(\"Missing Contract Market Hours for Today.\")\n hours = today[0].split(\":\")[1]\n if hours == \"CLOSED\" or hours != config.NORMAL_TRADING_HOURS:\n return False\n return True", "def test_busy_day_output_correct(price_data):\n volume_df = price_data\n res = c.calculate_busy_day(price_data)\n\n volume_df = volume_df.loc[(volume_df['ticker'] == 'MSFT')]\n avg_volume = volume_df['volume'].sum() / len(volume_df)\n volume_df['high_volume'] = volume_df['volume'] > 1.1 * avg_volume\n busy_days = volume_df.loc[volume_df['high_volume'] == True]\n\n assert (sorted(res.loc[res['ticker'] == 'MSFT'].date.unique() ==\n busy_days.date.unique()))", "def is_no_bell_day():\n today = time.localtime()[:3]\n for r in no_bell:\n if today >= r[0] and today <= r[1]:\n return True\n return False", "def check(self):\n validity_year = int(self.date[0:4])\n validity_month = int(self.date[5:7])\n validity_day = int(self.date[8:10])\n if datetime.today().year > validity_year:\n self.flag = False\n elif datetime.today().year == validity_year:\n if datetime.today().month > validity_month:\n self.flag = False\n elif datetime.today().month == validity_month:\n if datetime.today().day > validity_day:\n self.flag = False\n else:\n self.flag = True\n else:\n self.flag = True\n else:\n self.flag = True", "def availability_exists_today(self):\n\n availability_exists = False\n\n for avail in self.availabilities:\n\n # The availability is free, so one slot exists on this day\n if avail.is_free():\n availability_exists = True\n\n break\n\n return availability_exists", "def check_today(self):\n import time\n _time = time.time\n time.time = lambda: 1003539807.89\n try:\n assert Date(\"today\") == Date(\"10/19/2001\"), \"wrong date\"\n finally:\n time.time = _time", "def isCurrentDay(self):\n t = time()\n gmt = safegmtime(t + _tzoffset(self._tz, t))\n return (gmt[0] == self._year and gmt[1] == self._month and\n gmt[2] == self._day)", "def contact_now(date: str) -> bool:\n\n time_date = string_to_datetime(date)\n return date_is_today(time_date) or date_is_in_past(time_date)", "def is_open_for_betting(self):\n return self.is_open", "def sellAtMarketOpen(self):\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n day = dt_central.strftime(\"%a\")\n\n tm = dt_central.strftime(\"%H:%M:%S\")\n\n weekdays = [\"Sat\", \"Sun\"]\n\n # CHECK IF MARKET OPEN AND NOT WEEKEND\n if tm == \"08:30\" and day not in weekdays:\n\n queue_orders = self.mongo.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Account_ID\": self.account_id, \"Order_Type\" : \"SELL\"})\n\n for order in queue_orders:\n\n # CANCEL ORDER\n resp = self.tdameritrade.cancelOrder(order[\"Order_ID\"])\n\n if resp.status_code == 200 or resp.status_code == 201:\n\n trade_data = {\n \"Symbol\": order[\"Symbol\"],\n \"Side\": \"SELL\",\n \"Aggregation\": order[\"Aggregation\"],\n \"Strategy\": order[\"Strategy\"],\n \"Asset_Type\": order[\"Asset_Type\"],\n \"Account_ID\": self.account_id\n }\n\n # SELL MARKET ORDER\n self.placeOrder(trade_data, order, orderType=\"MARKET\")", "def is_workfree(date):\n \n return date.weekday() == 6 or is_holiday(date)", "def chky(uid):\n\tx = db.checkins_today(uid)\n\tif x == None: return False\n\telse: return True", "def check_OOS_last_day(df: pandas.core.frame.DataFrame, date: datetime):\n last_day = df['Date'].max()\n if date == last_day:\n return 1\n else:\n return 0", "def runs_today(self,s_id,day):\n if self.schedule_keys[s_id][day]==1:\n return True\n else:\n return False", "def close_position(self, symbol):\n #Need to add a check for intraday_quantity, if not 0 then was bought today\n #Need to check if there are any pending orders for any options before determining quantity\n df = self.get_option_positions(symbol=symbol)\n if df.shape[0] == 0:\n return True\n exp = sorted({exp: len(list(df[df['expiration_date'] == exp]['type'].unique())) for exp in list(df['expiration_date'].unique())}.items(),key=lambda x: (x[1],x[0]))[0]\n df = df[df['expiration_date']==exp[0]].sort_values(by=['quantity','type','mark_price'], ascending=[False,False,True]) if exp[1] > 1 else df\n option = list(df['option'].unique())[0] if exp[1] > 1 else list(df[df['expiration_date']==exp[0]]['option'].unique())[0]\n self.close_option(symbol, option[:-1].split('/')[-1], 'sell' if exp[1] == 1 or get_long_short_difference(df) > 0 else 'buy', 'close', max_quantity=get_max_quantity(df, option, exp[1]))\n self.close_position(symbol)", "def will_occur(self, now):\n return self.end_repeat is None or self.end_repeat >= now.date() or \\\n self.l_start_date >= now or self.l_end_date >= now", "def checkOpenStatus(self):\n xl = win32.gencache.EnsureDispatch('Excel.Application')\n update_file = \"BoardData_update.csv\"\n update_file_pth = os.path.join(self.current_dir, 'src', update_file)\n\n if xl.Workbooks.Count > 0:\n print(\"opened: \", xl.Workbooks.Count)\n\n # if none of opened workbooks matches the name, openes my_workbook\n if any(i.Name == update_file for i in xl.Workbooks):\n print(\"It is opended\")\n xl.Workbooks.Open(Filename=update_file_pth).Close(True)\n\n else:\n print(\"It is not opended\")", "def test_is_payday_positive0(self):\n date_to_check = date_class(2018,1,12)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == True\n\n date_to_check = date_class(2018,2,23)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == True\n\n date_to_check = date_class(2018,11,16)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == True\n\n date_to_check = date_class(2018,12,28)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == True" ]
[ "0.7893472", "0.7299794", "0.71417683", "0.64564335", "0.62261474", "0.6127471", "0.6050444", "0.596668", "0.57297295", "0.5713947", "0.5685722", "0.5678899", "0.5610287", "0.55595165", "0.54880613", "0.5486532", "0.5417402", "0.540892", "0.53961575", "0.53872633", "0.5376947", "0.5373459", "0.5333964", "0.53271675", "0.5307858", "0.5302999", "0.5299542", "0.5288933", "0.52879757", "0.5278832" ]
0.7844019
1
return_headers will just return the column names. update_small_file will just update the small file that starts on 1/1/2000
def update_all_stocks(return_headers=False, update_small_file=False): # 7-13-2017: 28788363 rows in full df zip_file_url = 'https://www.quandl.com/api/v3/databases/EOD/download?api_key=' + \ Q_KEY + '&download_type=partial' r = req.get(zip_file_url) z = zipfile.ZipFile(io.BytesIO(r.content)) z.extractall(path='../stockdata/') if return_headers: df = pd.read_csv('../stockdata/' + z.filelist[0].filename, parse_dates=True) df.set_index('Date', inplace=True) new_c = [re.sub('.\s', '_', c) for c in df.columns] return new_c df = pd.read_csv('../stockdata/' + z.filelist[0].filename) # it won't parse dates when it reads... df['Date'] = pd.to_datetime(df['Date']) df.set_index('Date', inplace=True) # fix problem with . and _ in Adjusted cols new_c = [re.sub('.\s', '_', c) for c in df.columns] df.columns = new_c full_df = pd.read_csv('../stockdata/all_stocks.csv.gzip', parse_dates=True, compression='gzip', index_col=0) if (full_df.columns == df.columns).mean() != 1: print('WARNING! Columns in the full df do not match the updated df columns.') print('full_df cols:') print(full_df.columns) print('') print('update df cols:') print(df.columns) print('') print('aborting and returning current full_df') return full_df if df.index.max() > full_df.index.max(): df.to_csv('../stockdata/all_stocks.csv.gzip', mode='a', compression='gzip') dtypes = ['object'] + ['float64'] * 10 full_df = pd.read_csv('../stockdata/all_stocks.csv.gzip', parse_dates=True, compression='gzip', index_col=0, dtype=dtypes) os.remove('../stockdata/' + z.filelist[0].filename) return full_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_headers(headers, out):\r\n out.write(common.to_csv_line(headers, \"efficient\"))", "def reformat_csv_header(self, path, train_file, test_file):\n\n \"\"\"\n \"id\",\"comment_text\",\"toxic\",\"severe_toxic\",\"obscene\",\"threat\",\"insult\",\"identity_hate\"\n \"\"\"\n\n train = pd.read_csv(os.path.join(path, train_file))\n test = pd.read_csv(os.path.join(path, test_file))\n train = train.drop('id', axis=1)\n test = test.drop('id', axis=1)\n for label in [\"jobflag\"]:\n test[label] = pd.Series(0, index=test.index)\n temp_path = os.path.join(path, \"temp\")\n if not os.path.isdir(temp_path):\n os.mkdir(temp_path)\n train.to_csv(os.path.join(temp_path, train_file),\n index=False, header=False)\n test.to_csv(os.path.join(temp_path, test_file),\n index=False, header=False)\n return temp_path", "def _download_table_kernel(self, sql, fp, header={}, overwrite=False):\n\t\tfn = ntpath.basename(fp)\n\n\t\tif not os.path.isfile(fp) or overwrite:\n\t\t\tprint((\"[hscobj] querying table {} from HSC\".format(fn)))\n\n\t\t\thscsspquery.hscSspQuery_retry(n_trials=20, sql=sql, filename_out=fp, release_version=self.data_release)\n\n\t\t\tif os.path.isfile(fp) and (os.stat(fp).st_size > 0):\n\t\t\t\tprint(\"[hscobj] successful\")\n\n\t\t\t\tif len(header) > 0:\n\t\t\t\t\t_add_header_columns_to_table(fp, header)\n\n\t\t\t\tstatus = True\n\n\t\t\telse: \n\t\t\t\tprint((\"[hscobj] querying table {} from HSC failed\".format(fn)))\n\t\t\t\tif os.path.isfile(fp):\n\t\t\t\t\tos.remove(fp)\n\t\t\t\tstatus = False\n\n\t\telse:\n\t\t\tprint((\"[hscobj] skip querying table {} from HSC as file exists\".format(fn)))\n\t\t\tstatus = True\n\n\t\treturn status", "def write_shortfile_table(self):\n\n # KMEL actually removes duplicate short filenames from this\n # table.\n\n start_of_shortfiles = self.db_file.tell()\n\n shortfiles = {}\n for miEntry in self.mainIndex:\n short_filename = miEntry.encodedShortfile\n if short_filename in shortfiles:\n miEntry.set_shortfile_offset(\n shortfiles[short_filename])\n else:\n shortfiles[short_filename] = \\\n self.db_file.tell() - start_of_shortfiles\n\n miEntry.set_shortfile_offset(\n shortfiles[short_filename])\n self.db_file.write(short_filename)", "def update_header():\n print_debug_info()\n if not should_do_write():\n debug(\"should not write this buffer.\")\n return\n\n if not (has_header() or suffix_is_supported()):\n # This file do not have a header, or it's format is unknown, quit.\n debug(\"cannot add header to a script of unknown format.\")\n return\n\n # if current buffer is not modified, do not bother to update it's date.\n if not modified():\n debug(\"Buffer not modified, just quit\")\n return\n\n row, column = vim.current.window.cursor\n header_template = globals().get(\"%s_header\" % SUFFIX).rstrip()\n\n # if line has the keyword, find the current for the keyword, get the line, re-render it and fill it in.\n head = CURRENT_BUFFER[:10]\n\n more_updates = vim.eval(\"g:BHUpdates\")\n\n update = {\n 'Maintained by': AUTHOR,\n 'Modified by': AUTHOR,\n 'Last modified': datetime.now().strftime(\"%Y-%m-%d %H:%M\"),\n 'Filename': FILENAME,\n }\n update.update(more_updates)\n for index, line in enumerate(head):\n for keyword in update:\n if line.find(keyword) != -1:\n original_line = [_line for _line in header_template.splitlines() if _line.find(keyword) != -1]\n if original_line:\n original_line = original_line[0]\n else:\n continue\n debug(\"original line: %s\" % original_line)\n debug(\"line to be replaced: %s\" % line)\n rendered_line = original_line % {KEYWORDS[keyword]: update[keyword]}\n debug(\"rendered line: %s\" % rendered_line)\n CURRENT_BUFFER[index] = rendered_line\n\n vim.current.window.cursor = (row, column)", "def update_metadata_csv(self, source):\n timestamp = os.path.getmtime(source)\n filedate = datetime.datetime.fromtimestamp(timestamp)\n return self.update_metadata_date(filedate)", "def update_model_output(self):\n warnings.warn(\"Please ensure that the column names of the new file accurately corresponds to the relevant column names in the exisitng file\")\n column_names_new = self.new_data.head()\n column_names_old = self.existing_data.head()\n for column_name in column_names_new:\n if column_name in column_names_old:\n self.existing_data[column_name] = self.new_data[column_name]\n \n self.existing_data.to_csv(filename_main, index = False)", "def fix_headers(hParams,testMode=False):\n \n \n fileList = glob.glob(hParams['fileList'])\n for oneFile in fileList:\n with fits.open(oneFile,'update') as HDUList_orig:\n if testMode == True:\n print(\"Doing a dry run without modifying headers\")\n HDUList = fits.HDUList([fits.PrimaryHDU(None,header=HDUList_orig[0].header)])\n primHead = HDUList[0].header\n else:\n primHead = HDUList_orig[0].header\n\n colcorner = hParams['COLCORNR'][primHead['SCA_ID']]\n rowcorner = hParams['ROWCORNR'][primHead['SCA_ID']]\n \n detTiming = pynrc.pynrc_core.DetectorOps(detector=481,\n wind_mode=hParams['wind_mode'],\n xpix=hParams['xpix'],\n ypix=hParams['ypix'],\n x0=colcorner-1,\n y0=rowcorner-1,\n nint=hParams['nint'],\n ngroup=hParams['ngroup'],\n nf=hParams['nf'])\n correctHead = detTiming.make_header()\n\n obsId = primHead['OBS_ID']\n if obsId in hParams['expStart'].keys():\n expStart = hParams['expStart'][obsId]\n date, time = expStart.split('T')\n primHead['DATE-OBS'] = date\n primHead['TIME-OBS'] = time\n \n t_expStart = Time(expStart)\n t_expEnd = t_expStart + correctHead['EXPTIME'] * u.second\n expEnd = t_expEnd.fits\n date, time = expEnd.split('T')\n primHead['DATE-END'] = date\n primHead['TIME-END'] = time\n else:\n print(\"Couldn't find exp start for {}\".format(obsId))\n \n\n for oneKey in ['TFRAME','TGROUP','INTTIME','EXPTIME',\n 'TREFROW','BREFROW','LREFCOL','RREFCOL',\n 'COLCORNR','ROWCORNR']:\n primHead[oneKey] = correctHead[oneKey]\n \n if hParams['wind_mode'] == 'WINDOW':\n primHead['HWINMODE'] = 'ENABLE'\n else:\n primHead['HWINMODE'] = 'DISABLE'\n primHead['DETECTOR'] = detectorDict[primHead['SCA_ID']]\n \n primHead['TLDYNEID'] = hParams['teledyneID'][primHead['SCA_ID']]\n if testMode == True:\n pdb.set_trace()", "def writeheader(fh,colnames):\n for i in range(len(colnames)):\n fh.write('# %d %s\\n'%(i+1,colnames[i]))", "def writeheader(writer):\n writer.writerow(dict((fn, fn) for fn in writer.fieldnames))", "def file_table(list_observations, indir, informat, outfile):\n print('Creating file summary table ...')\n\n # We gather all infos in a list of dicts and write this\n # as a FITS table at the end.\n # for documentation see http://gamma-astro-data-formats.readthedocs.org/en/latest/data_storage/hdu_index/index.html\n\n HDU_CLASS_TAGS = dict(\n events='events',\n aeff='aeff_2d',\n edisp='edisp_2d',\n psf_3gauss='psf_3gauss',\n psf_king='psf_king',\n psf_table='psf_table',\n gti='gti'\n )\n\n rows = []\n for obs in list_observations.observations:\n testfile=obs.out_filename(\"events\", format=informat, dir=indir)\n try:\n table = Table.read(str(testfile), hdu='EVENTS')\n except Exception:\n print \"fits corrupted for file \"+str(filename)\n continue\n #for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n #for filetype in ['events']:\n for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n filename = obs.out_filename(filetype, format=informat, dir=indir)\n\n if filename.is_file():\n print('Processing {}'.format(filename))\n\n data = dict()\n\n # OBS_ID\n data['OBS_ID'] = obs.obs_id\n\n # HDU_TYPE\n if filetype in ('psf_3gauss'):\n data['HDU_TYPE'] = 'psf'\n else:\n data['HDU_TYPE'] = str(filetype)\n\n # HDU_CLASS\n data['HDU_CLASS'] = HDU_CLASS_TAGS[filetype]\n\n # FILE_DIR (relative path)\n data['FILE_DIR'] = str(os.path.relpath(str(obs.out_filename(filetype).parent), str(Path(outfile).parent)))\n\n # FILE_NAME\n data['FILE_NAME'] = str(obs.filename(filetype, format=informat).parts[-1])\n\n # HDU-INFOS\n hdu_list = fits.open(str(filename))\n hdu = hdu_list[1]\n header = hdu.header\n data['HDU_NAME'] = hdu.name\n\n # FILE-INFOS\n stat = filename.stat()\n data['SIZE'] = stat.st_size\n data['MTIME'] = stat.st_mtime\n data['MD5'] = hashlib.md5(filename.open('rb').read()).hexdigest()\n\n # if 'HDUCLAS2' in header:\n # data['HDUCLASS'] = header['HDUCLAS2']\n # else:\n # data['HDUCLASS'] = 'EVENTS'\n\n # if its the events-file, use a second dict for the gti-hdu\n if filetype == 'events':\n data_gti = dict()\n data_gti['OBS_ID'] = obs.obs_id\n data_gti['HDU_TYPE'] = 'gti'\n data_gti['HDU_CLASS'] = 'gti'\n data_gti['FILE_DIR'] = data['FILE_DIR']\n data_gti['FILE_NAME'] = data['FILE_NAME']\n data_gti['HDU_NAME'] = hdu_list[2].name\n data_gti['SIZE'] = data['SIZE']\n data_gti['MTIME'] = data['MTIME']\n data_gti['MD5'] = data['MD5']\n\n rows.append(data_gti)\n\n rows.append(data)\n hdu_list.close()\n\n else:\n print('File not found: {}'.format(filename))\n\n names = [\n 'OBS_ID', 'HDU_TYPE', 'HDU_CLASS',\n 'FILE_DIR', 'FILE_NAME', 'HDU_NAME',\n 'SIZE', 'MTIME', 'MD5'\n ]\n table = Table(rows=rows, names=names)\n\n print('Writing {}'.format(outfile))\n table.write(str(outfile), overwrite=True)\n # add hdu name\n hdulist = fits.open(str(outfile), mode='update')\n hdulist[1].name = 'HDU_INDEX'\n hdulist.close()", "def file_table(list_observations, indir, informat, outfile):\n print('Creating file summary table ...')\n\n # We gather all infos in a list of dicts and write this\n # as a FITS table at the end.\n # for documentation see http://gamma-astro-data-formats.readthedocs.org/en/latest/data_storage/hdu_index/index.html\n\n HDU_CLASS_TAGS = dict(\n events='events',\n aeff='aeff_2d',\n edisp='edisp_2d',\n psf_3gauss='psf_3gauss',\n psf_king='psf_king',\n psf_table='psf_table',\n gti='gti'\n )\n\n rows = []\n for obs in list_observations.observations:\n events_filename = Path(indir) / obs.filename('events', format=informat)\n try:\n table = Table.read(str(events_filename), hdu='EVENTS')\n except Exception:\n print \"fits corrupted for file \" + str(events_filename)\n continue\n if table.meta[\"OBS_ID\"]!=obs.obs_id:\n continue\n # for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n # for filetype in ['events']:\n #for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n for filetype in ['events', 'aeff', 'edisp', 'psf_table']:\n filename = Path(indir) / obs.filename(filetype, format=informat)\n\n if filename.is_file():\n print('Processing {}'.format(filename))\n\n data = dict()\n\n # OBS_ID\n data['OBS_ID'] = obs.obs_id\n\n # HDU_TYPE\n if filetype in ('psf_3gauss'):\n data['HDU_TYPE'] = 'psf'\n elif filetype in ('psf_table'):\n data['HDU_TYPE'] = 'psf'\n else:\n data['HDU_TYPE'] = str(filetype)\n\n # HDU_CLASS\n data['HDU_CLASS'] = HDU_CLASS_TAGS[filetype]\n\n # FILE_DIR (relative path)\n data['FILE_DIR'] = str(\n os.path.relpath(str(obs.out_filename(filetype).parent), str(Path(outfile).parent)))\n\n # FILE_NAME\n data['FILE_NAME'] = str(obs.filename(filetype, format=informat).parts[-1])\n\n # HDU-INFOS\n hdu_list = fits.open(str(filename))\n hdu = hdu_list[1]\n header = hdu.header\n data['HDU_NAME'] = hdu.name\n\n # FILE-INFOS\n stat = filename.stat()\n data['SIZE'] = stat.st_size\n data['MTIME'] = stat.st_mtime\n data['MD5'] = hashlib.md5(filename.open('rb').read()).hexdigest()\n\n # if 'HDUCLAS2' in header:\n # data['HDUCLASS'] = header['HDUCLAS2']\n # else:\n # data['HDUCLASS'] = 'EVENTS'\n\n # if its the events-file, use a second dict for the gti-hdu\n if filetype == 'events':\n data_gti = dict()\n data_gti['OBS_ID'] = obs.obs_id\n data_gti['HDU_TYPE'] = 'gti'\n data_gti['HDU_CLASS'] = 'gti'\n data_gti['FILE_DIR'] = data['FILE_DIR']\n data_gti['FILE_NAME'] = data['FILE_NAME']\n data_gti['HDU_NAME'] = hdu_list[2].name\n data_gti['SIZE'] = data['SIZE']\n data_gti['MTIME'] = data['MTIME']\n data_gti['MD5'] = data['MD5']\n\n rows.append(data_gti)\n\n rows.append(data)\n hdu_list.close()\n\n else:\n print('File not found: {}'.format(filename))\n\n names = [\n 'OBS_ID', 'HDU_TYPE', 'HDU_CLASS',\n 'FILE_DIR', 'FILE_NAME', 'HDU_NAME',\n 'SIZE', 'MTIME', 'MD5'\n ]\n\n table = Table(rows=rows, names=names)\n\n print('Writing {}'.format(indir + \"/\" + str(outfile)))\n table.write(indir + \"/\" + str(outfile), overwrite=True)\n # add hdu name\n hdulist = fits.open(indir + \"/\" + str(outfile), mode='update')\n hdulist[1].name = 'HDU_INDEX'\n hdulist.close()", "def format_report_header(self):", "def process_header_data(spark, input_dir, output):\n\theader = spark.read \\\n\t\t.option(\"header\", True) \\\n\t\t.option(\"escape\", '\"') \\\n\t\t.option(\"inferSchema\", True) \\\n\t\t.csv(f\"{input_dir}/ams/*/*/ams__header_*__*.csv\") \\\n\t\t.select(*header_cols) \\\n\t\t.where(col('identifier').isNotNull())\n\n\tbill = spark.read \\\n\t\t.option(\"header\", True) \\\n\t\t.option(\"escape\", '\"') \\\n\t\t.option(\"inferSchema\", True) \\\n\t\t.csv(f\"{input_dir}/ams/*/*/ams__billgen_*__*.csv\") \\\n\t\t.select(*bill_cols)\n\n\theader_full = header.join(bill, ['identifier'], how='left')\n\n\theader_full.repartition(1).write.mode('overwrite').format(\"csv\") \\\n\t\t.option(\"header\", True) \\\n\t\t.option(\"escape\", '\"') \\\n\t\t.save(f\"{output}/header/\")", "def setheaders(f):\n f.headers['OBSERVER'] = \"'%s'\" % camera.status.observer\n f.headers['FILTERID'] = \"'%s'\" % filtname(camera.status.filter)\n f.headers['FILTER'] = \"%1d\" % camera.status.filter\n f.headers['XYSTAGE'] = \"'%d,%d'\" % camera.status.guider\n f.headers['MIRROR'] = \"'%s'\" % camera.status.mirror\n if camera.status.imgtype == 'BIAS':\n f.headers['BIAS'] = camera.status.object\n elif camera.status.imgtype == 'DARK':\n f.headers['DARK'] = camera.status.object\n else:\n f.headers['OBJECT'] = camera.status.object\n try:\n skytemp = weather.status.skytemp\n f.headers['SKYTEMP'] = \"%4.1f\" % skytemp\n f.comments['SKYTEMP'] = \"'Infrared sky temp in degC'\"\n except:\n pass\n\n try:\n if not camera.status.TJ.current.posviolate: #Position calibrated to epoch\n ra = camera.status.TJ.current.Ra/15/3600\n dec = camera.status.TJ.current.Dec/3600\n epoch = camera.status.TJ.current.Epoch\n alt = camera.status.TJ.current.Alt\n GotTJ = True\n elif camera.status.TJ.current.RaC:\n ra = camera.status.TJ.current.RaC\n dec = camera.status.TJ.current.DecC\n alt = camera.status.TJ.current.Alt\n t = time.gmtime()\n epoch = t.tm_year + (t.tm_yday/366.0)\n GotTJ = True\n else:\n GotTJ = False\n except AttributeError:\n GotTJ = False \n if GotTJ:\n f.headers['RA_OBJ'] = \"%12.9f\" % (ra*15.0)\n f.headers['RA'] = \"'%s'\" % sexstring(ra)\n f.headers['DEC_OBJ'] = \"%13.9f\" % dec\n f.headers['DEC'] = \"'%s'\" % sexstring(dec)\n f.headers['EQUINOX'] = \"%6.1f\" % epoch\n f.headers['SECZ'] = \"%6.3f\" % (1/math.cos((90-alt)*math.pi/180))\n if GotFT:\n hjd,message = fitstime.findtime(fimage=f, verbose=0, allfields=0)\n if type(hjd) == float:\n f.headers['HJD'] = \"%f\" % hjd\n f.comments['HJD'] = \"Heliocentric Julian Day at exposure midpoint\"", "def update_header(self) -> None:\n self.header.partial_reset()\n self.header.point_format_id = self.points.point_format.id\n self.header.point_data_record_length = self.points.point_size\n\n if len(self.points) > 0:\n self.header.update(self.points)\n\n if self.header.version.minor >= 4:\n if self.evlrs is not None:\n self.header.number_of_evlrs = len(self.evlrs)\n self.header.start_of_waveform_data_packet_record = 0\n # TODO\n # if len(self.vlrs.get(\"WktCoordinateSystemVlr\")) == 1:\n # self.header.global_encoding.wkt = 1\n else:\n self.header.number_of_evlrs = 0", "def write_headers(filename, data, lima):\n\tfrom utilities import file_type\n\tfrom EMAN2db import db_open_dict\n\n\tftp = file_type(filename)\n\tif ftp == \"bdb\":\n\t\t# For unknown reasons this does not work on Linux, but works on Mac ??? Really?\n\t\tDB = db_open_dict(filename)\n\t\tfor i in range(len(lima)):\n\t\t\tDB.set_header(lima[i], data[i])\n\t\tDB.close()\n\t\t#for i in range(len(lima)):\n\t\t#\tdata[i].write_image(filename, lima[i])\n\telif ftp == \"hdf\":\n\t\tfor i in range(len(lima)):\n\t\t\tdata[i].write_image(filename, lima[i], EMUtil.ImageType.IMAGE_HDF, True)\n\telse:\n\t\tERROR(\"Unacceptable file format\",\"write_headers\",1)", "def check_headers(df, filename):\n print(\"Checking headers for: \" + filename)\n read_message = \"\"\n\n original_colnames = df.columns.tolist()\n # good_colnames = [\"Marker\",\"Chr\",\"Position\",\"Effect_allele\",\"Other_allele\",\"Beta\",\"SE\",\"Pval\",\"EAF\",\"N\",\"Imputed\",\"Info\",\"Information_type\"]\n\n # Before actually checking the contents header, are there even headers?\n passed = False\n for col in original_colnames:\n if col.lower().strip() in [\"name\", \"snp\", \"snpid\", \"id\", \"markername\", \"marker\", \"rsid\"]:\n passed = True\n\n # Fail the check if the name column is not found, this is to stop the checks if there is a file without a header\n if not passed:\n # First check whether this is one of the files of Malik, where the columns were missing\n if filename.split('/')[-1].startswith('INTERSTROKE'):\n # Add column names and moveverything down\n first_data_row = df.columns.tolist()\n df.loc[-1] = first_data_row # adding a row\n df.index = df.index + 1 # shifting index\n df = df.sort_index() # sorting by index\n df.columns = [\"SNPID\", \"chr\", \"position\", \"coded_all\", \"noncoded_all\", \"strand_genome\", \"beta\", \"SE\", \"pval\", \"AF_coded_all\", \"n_cases\", \"n_controls\", \"imputed\", \"oevar_imp\"]\n original_colnames = df.columns.tolist()\n read_message = read_message + \"NAMECOLCHECK;CUSTOMCOLS\" \n\n elif filename.split('/')[-1].startswith('ASGC'):\n # Add column names and moveverything down\n first_data_row = df.columns.tolist()\n df.loc[-1] = first_data_row # adding a row\n df.index = df.index + 1 # shifting index\n df = df.sort_index() # sorting by index\n df.columns = [\"SNPID\", \"chr\", \"position\", \"n_cases\", \"n_controls\", \"coded_all\", \"noncoded_all\", \"AF_coded_all\", \"beta\", \"SE\", \"pval\", \"imputed\", \"info\"]\n original_colnames = df.columns.tolist()\n read_message = read_message + \"NAMECOLCHECK;CUSTOMCOLS\" \n\n else:\n # print(\"Something went wrong for \" + filename)\n # print(\"Please make sure there are headers in the file and that there is a name/id/marker column\")\n return df, \"NAMECOLCHECK;FAILED\"\n \n # Variable to hold all unknown columns\n unknown_cols = []\n\n # Loop over al colnames and rename it\n for index,col in enumerate(original_colnames):\n if col.lower().strip() in [\"name\", \"snp\", \"snpid\", \"id\", \"markername\", \"marker\", \"rsid\"]:\n original_colnames[index] = \"Marker\"\n\n elif col.lower().strip() in [\"chromosome\", \"chr\", \"chrom\"]:\n original_colnames[index] = \"Chr\"\n\n elif col.lower().strip() in [\"pos\", \"position\", \"bp\"]:\n original_colnames[index] = \"Position\"\n\n elif col.lower().strip() in [\"effallele\", \"eff_allele\", \"effectallele\", \"effect_allele\", \"coded_all\", \"codedall\", \"allele1\"]:\n original_colnames[index] = \"Effect_allele\"\n\n elif col.lower().strip() in [\"noneffallele\", \"noneff_allele\", \"noneffectallele\", \"noneffect_allele\", \"non_coded_all\", \"noncoded_all\", \"noncodedall\", \"other_allele\", \"otherallele\", \"allele2\"]:\n original_colnames[index] = \"Other_allele\"\n\n elif col.lower().strip() in [\"beta\"]:\n original_colnames[index] = \"Beta\"\n\n elif col.lower().strip() in [\"se\", \"sebeta\", \"stderr\"]:\n original_colnames[index] = \"SE\"\n\n elif col.lower().strip() in [\"p\", \"pval\", \"p-value\"]:\n original_colnames[index] = \"Pval\"\n\n elif col.lower().strip() in [\"eaf\", \"freq1\", \"af_coded_all\", \"effallelefreq\"]:\n original_colnames[index] = \"EAF\"\n\n elif col.lower().strip() in [\"n\", \"ntot\", \"n_total\"]:\n original_colnames[index] = \"N\"\n\n elif col.lower().strip() in [\"ncase\", \"ncases\", \"n_case\", \"n_cases\"]:\n original_colnames[index] = \"N_cases\"\n\n elif col.lower().strip() in [\"ncontrol\", \"ncontrols\", \"n_control\", \"n_controls\"]:\n original_colnames[index] = \"N_controls\"\n\n elif col.lower().strip() in [\"imputed\", \"imp\"]:\n original_colnames[index] = \"Imputed\"\n\n elif col.lower().strip() in [\"inf\", \"info\", \"info_rsq\", \"rsqr\"]:\n original_colnames[index] = \"Info\"\n\n elif col.lower().strip() in [\"inf_type\", \"information_type\"]:\n original_colnames[index] = \"Information_type\"\n\n # Not neccesary for the toolkit, but reduce the error messages\n elif col.lower().strip() in [\"strand\", \"strand_genome\"]:\n original_colnames[index] = \"Strand\"\n\n elif col.lower().strip() in [\"oevar_imp\"]:\n original_colnames[index] = \"oevar_imp\"\n\n elif col.lower().strip() in [\"pval.t\"]:\n original_colnames[index] = \"pval.t\"\n\n elif col.lower().strip() in [\"df.t\"]:\n original_colnames[index] = \"df.t\"\n\n elif col.lower().strip() in [\"approxdf\"]:\n original_colnames[index] = \"approxdf\"\n\n elif col.lower().strip() in [\"or\"]:\n original_colnames[index] = \"OR\"\n\n else:\n # print(\"Could not match the string: \" + col)\n # print(\"Please make sure this column is handled correctly in the toolkit\")\n unknown_cols.append(col)\n\n # Change column names\n df.columns = original_colnames\n\n # Write the unknown columns into the fail_reason variable\n if len(unknown_cols) > 0:\n read_message = read_message + \"NAMECOLCHECK;PASSED\" + \" UNRECOGNIZED;\" + ' '.join([str(elem) for elem in unknown_cols])\n else:\n read_message = read_message + \"NAMECOLCHECK;PASSED\"\n\n return df, read_message", "def _update_headers(self):\n if not self._header_updated:\n headers = self.head_obj(self._client, self._spec)\n self._headers.update(headers)\n self._header_updated = True", "def _configure_bintable_header(new_header, table_headers):\n\n # Using a single header to get the column descriptions\n column_info = {}\n for kwd in table_headers[0]:\n if \"TTYPE\" not in kwd:\n continue\n \n colname = table_headers[0][kwd]\n num = kwd.replace(\"TTYPE\", \"\")\n \n cards = []\n for att in ['TTYPE', 'TFORM', 'TUNIT', 'TDISP', 'TDIM']:\n try:\n cards.append(table_headers[0].cards[att+num])\n except KeyError:\n pass # if we don't have info for this keyword, just skip it\n \n column_info[colname] = (num, cards)\n\n # Adding column descriptions and additional info\n for kwd in new_header:\n if \"TTYPE\" not in kwd:\n continue\n \n colname = new_header[kwd]\n num = kwd.replace(\"TTYPE\", \"\")\n \n info_row = column_info.get(colname)\n if not info_row:\n new_header.comments[kwd] = 'column name'\n new_header.comments[kwd.replace(\"TTYPE\", \"TFORM\")] = 'column format'\n continue\n \n info_num = info_row[0]\n cards = info_row[1]\n \n for key, val, desc in cards:\n key_new = key.replace(info_num, num)\n try:\n ext_card = new_header.cards[key_new]\n \n if ext_card[1]:\n val = ext_card[1]\n if ext_card[2]:\n desc = ext_card[2]\n \n new_header[key_new] = (val, desc)\n except KeyError: # card does not already exist, just add new one\n new_header.set(key_new, val, desc, after=kwd)\n\n # Adding any additional keywords from the original cutout headers\n shared_keywords = _combine_headers(table_headers, constant_only=True)\n for kwd in shared_keywords:\n if kwd in new_header: # Don't overwrite anything already there\n continue\n\n if any(x in kwd for x in [\"WCA\", \"WCS\", \"CTY\", \"CRP\", \"CRV\", \"CUN\",\n \"CDL\", \"11PC\", \"12PC\", \"21PC\", \"22PC\"]): # Skipping column WCS keywords\n continue\n\n new_header.append(shared_keywords.cards[kwd])", "def getHeaderDict(self):\r\n #put the headers into a dict\r\n \r\n print(\"opening \",self.filename)\r\n with open(self.filename, 'r') as readfile:\r\n headers = readfile.readline()\r\n firstrow = readfile.readline()\r\n if not firstrow:\r\n print(\"first line after headers is blank\")\r\n self.loadDictRow(keystring=headers)\r\n else: #assume first row after headers is test router\r\n print(\"load test router row\") \r\n self.loadDictRow(keystring = headers, valuestring = firstrow) \r\n \r\n # check for headers\r\n miscount=0\r\n for key in self.dataheader:\r\n if not key in self.objdict:\r\n print(\"missing key !\", key)\r\n miscount += 1\r\n\r\n if miscount == 0:\r\n print(\"all Columns found. Thank you.\")\r\n # elif (miscount == 11) and (\"IPADDRESS\" in ):\r\n # print(\"Found IP Address column. program will add additional columns\")\r\n elif miscount > 11:\r\n print(\"Could not locate Header Row\")\r\n elif miscount > 0:\r\n print(\"some columns missing, will add additional columns\")\r\n \r\n \r\n #end file check on filename \r", "def first_header():\n return \"\"\"\n<th>Target\n<th>Date\n<th colspan=\"2\">UT\n<th>Exp\n<th>Cycle\n<th>No. of\n<th>Filters\n<th>XxY\n<th>Speed\n<th>NX1xNY1\n<th>X1\n<th>Y1\n<th>NX2xNY2\n<th>X2\n<th>Y2\n<th>Grat.\n<th>Slit\n<th>Slit\n<th>ID\n<th>PI\n<th align=\"left\">Comment\n\"\"\"", "def _write_all_headers(unit, fobj):\n\n now = datetime.datetime.now()\n if unit.jump_speed:\n jump_speed = '%.2f' % unit.jump_speed\n else:\n jump_speed = None\n\n # Case and order is significant.\n header_map = (\n ('Name', unit.name),\n ('Reference', unit.reference),\n ('Type', unit.unit_type),\n ('Unit_Era', unit.unit_era),\n ('Unit_TRO', unit.unit_tro),\n ('Move_Type', unit.unit_move_type),\n ('Tons', unit.weight),\n ('Comment', \"Saved by: btmux_maplib_io(Python) at %s\" % now.ctime()),\n ('Computer', 4),\n ('Radio', 5),\n ('Heat_Sinks', unit.heatsink_total),\n ('Mech_BV', unit.battle_value),\n ('Cargo_Space', unit.cargo_space),\n ('Max_Suits', unit.battlesuit_total),\n ('Max_Speed', '%.2f' % unit.max_speed),\n ('Jump_Speed', jump_speed),\n ('Specials', ' '.join(list(unit.specials))),\n )\n\n for header_name, header_value in header_map:\n if not header_value:\n continue\n if isinstance(header_value, list):\n header_value = ' '.join(header_value)\n header_str = \"{header_name:<16} {{ {header_value} }}\\n\".format(\n header_name=header_name, header_value=header_value)\n fobj.write(header_str)", "def _header(self, path, files):\n headers = [fits.getheader(os.path.join(path, f))\n for f in sorted(files)]\n N = len(headers)\n\n def mean_key(headers, key, comment, type):\n return (np.mean([type(h[key]) for h in headers]), comment)\n\n h = fits.Header()\n h['BUNIT'] = 'e-/s'\n h['ORIGIN'] = 'Zwicky Transient Facility', 'Data origin'\n h['OBSERVER'] = 'ZTF Robotic Software', 'Observer'\n h['INSTRUME'] = 'ZTF/MOSAIC', 'Instrument name'\n h['OBSERVAT'] = 'Palomar Observatory', 'Observatory'\n h['TELESCOP'] = 'Palomar 48-inch', 'Observatory telescope'\n h['OBSLON'] = -116.8597, 'Observatory longitude (deg)'\n h['OBSLAT'] = 33.3483, 'Observatory latitude (deg E)'\n h['OBSALT'] = 1706., 'Observatory altitude (m)'\n h['IMGTYPE'] = 'object', 'Image type'\n h['NIMAGES'] = N, 'Number of images in stack'\n h['EXPOSURE'] = (sum([_['EXPOSURE'] for _ in headers]),\n 'Total stack exposure time (s)')\n if len(headers) == 0:\n return h\n\n h['MAGZP'] = 25.0, 'Magnitude zero point, solar color'\n h['MAGZPRMS'] = (\n np.sqrt(np.sum([h.get('MAGZPRMS', 0)**2 for h in headers])) / N,\n 'Mean MAGZP RMS')\n h['PCOLOR'] = headers[0]['PCOLOR']\n h['CLRCOEFF'] = mean_key(headers, 'CLRCOEFF',\n 'Mean color coefficient', float)\n\n h['OBSJD1'] = float(headers[0]['OBSJD']), 'First shutter start time'\n h['OBSJDN'] = float(headers[-1]['OBSJD']), 'Last shutter start time'\n h['OBSJDM'] = mean_key(\n headers, 'OBSJD', 'Mean shutter start time', float)\n\n wcsfn = sorted(files)[0]\n wcs = WCS(fits.getheader(os.path.join(path, wcsfn),\n extname='SANGLE'))\n h.update(wcs.to_header())\n h['WCSORIGN'] = wcsfn\n\n h['DBPID'] = (','.join([str(_['DBPID']) for _ in headers]),\n 'Database processed-image IDs')\n h['DESG'] = headers[0]['DESG'], 'Target designation'\n for k, comment in {\n 'RH': 'Mean heliocentric distance (au)',\n 'DELTA': 'Mean observer-target distance (au)',\n 'PHASE': 'Mean Sun-target-observer angle (deg)',\n 'RDOT': 'Mean heliocentric radial velocity, km/s',\n 'SELONG': 'Mean solar elongation, deg',\n 'SANGLE': 'Mean projected target->Sun position angle, deg',\n 'VANGLE': 'Mean projected velocity position angle, deg',\n 'TRUEANOM': 'Mean true anomaly (osculating), deg',\n 'TMTP': 'Mean T-Tp (osculating), days',\n 'TGTRA': 'Mean target RA, deg',\n 'TGTDEC': 'Mean target Dec, deg',\n 'TGTDRA': 'Mean target RA*cos(dec) rate of change,arcsec/s',\n 'TGTDDEC': 'Mean target Dec rate of change, arcsec/s',\n 'TGTRASIG': 'Mean target RA 3-sigma uncertainty, arcsec',\n 'TGTDESIG': 'Mean target Dec 3-sigma uncertainty, arcsec',\n }.items():\n try:\n h[k] = mean_key(headers, k, comment, float)\n except ValueError:\n # target rates might be empty strings\n h[k] = ''\n\n return h", "def test_full_fasta_headers(self):\r\n convert_fastq(self.fasta_file_path, self.qual_file_path,\r\n full_fasta_headers=True, output_directory=self.output_dir)\r\n\r\n actual_output_file_path = get_filename_with_new_ext(\r\n self.fasta_file_path,\r\n '.fastq',\r\n self.output_dir)\r\n\r\n actual_output_file = open(actual_output_file_path)\r\n actual_output = actual_output_file.read()\r\n actual_output_file.close()\r\n self._files_to_remove.append(actual_output_file_path)\r\n\r\n self.assertEquals(actual_output, expected_fastq_full_fasta_headers)", "def get_header_table(self , dt, ds = '' , all_ds = '', length = ''):\n index_low = self.unique_dates[ds]['indices'][dt]['low']\n #index_up = self.unique_dates[best_ds]['indices'][dt]['up'] \n hd = self.data[ds]['header_table'][index_low:index_low+length] \n hd['duplicates'] = all_ds \n \n return hd", "def get_raw_column_names(request):\n body = json.loads(request.body)\n import_file = ImportFile.objects.get(pk=body.get('import_file_id'))\n\n return {\n 'status': 'success',\n 'raw_columns': import_file.first_row_columns\n }", "def transfer_header(infl, outfl):\n\n print \"Transfer\", infl, \"to\", outfl\n fin = pyfits.open(infl)\n fout = pyfits.open(outfl, 'update')\n\n dont_transfer = [\"HSTSLAC\", \"MDRIZSKY\", \"LACOSMIC\", \"HISTORY\", \"COMMENT\", \"\"]\n\n print \"Transferring: \",\n for i in range(len(fin)):\n for key in fin[i].header:\n if dont_transfer.count(key) == 0:\n if fin[i].header[key] != fout[i].header.get(key, default = None):\n print key,\n\n fout[i].header[key] = fin[i].header[key]\n fout.flush()\n fout.close()\n fin.close()\n print", "def insert_column_headers_for_outlier_correction(\n self, data_df: pd.DataFrame, new_headers: List[str], filepath: str\n ) -> pd.DataFrame:\n\n if len(new_headers) != len(data_df.columns):\n difference = int(len(data_df.columns) - len(new_headers))\n bp_missing = int(abs(difference) / 3)\n if difference < 0:\n raise DataHeaderError(\n msg=f\"SIMBA ERROR: SimBA expects {len(new_headers)} columns of data inside the files within project_folder/csv/input_csv directory. However, within file {filepath} file, SimBA found {len(data_df.columns)} columns. Thus, there is {abs(difference)} missing data columns in the imported data, which may represent {int(bp_missing)} bodyparts if each body-part has an x, y and p value. Either revise the SimBA project pose-configuration with {bp_missing} less body-part, or include {bp_missing} more body-part in the imported data\"\n )\n else:\n raise DataHeaderError(\n msg=f\"SIMBA ERROR: SimBA expects {len(new_headers)} columns of data inside the files within project_folder/csv/input_csv directory. However, within file {filepath} file, SimBA found {len(data_df.columns)} columns. Thus, there is {abs(difference)} more data columns in the imported data than anticipated, which may represent {int(bp_missing)} bodyparts if each body-part has an x, y and p value. Either revise the SimBA project pose-configuration with {bp_missing} more body-part, or include {bp_missing} less body-part in the imported data\"\n )\n else:\n data_df.columns = new_headers\n return data_df", "def get_dataframe_from_merged_csv_files(tables_metadata, debug=False):\n combined_table = None\n for table_metadata in tables_metadata:\n if combined_table is None:\n combined_table = get_normalized_data_table(table_metadata)\n continue\n next_data_table = get_normalized_data_table(table_metadata)\n combined_table = combined_table.join(next_data_table)\n print_data_table_length('combined_table', combined_table.data, debug=debug)\n drop_headers('final_csv', combined_table.data)\n rename_headers('final_csv', combined_table.data)\n return combined_table.data" ]
[ "0.5855104", "0.5634152", "0.5610796", "0.5565353", "0.55391574", "0.553298", "0.54687357", "0.54682636", "0.5437986", "0.54259366", "0.53932583", "0.53861797", "0.53778136", "0.53684855", "0.53658843", "0.5357169", "0.5348249", "0.5340234", "0.5294776", "0.52871776", "0.52544117", "0.52523273", "0.5252284", "0.5245004", "0.52405024", "0.5230003", "0.51954204", "0.51901984", "0.51738304", "0.5169486" ]
0.6717363
0
Retrieves and saves last n days from the full stock dataset for analysis.
def get_last_n_days(n=100): df = pd.read_csv('../stockdata/all_stocks.csv.gzip', parse_dates=True) dates = sorted(df.index.unique())[-n:] new_df = df.loc[dates] new_df.to_csv('../stockdata/all_stocks_last' + str(n) + '_days.csv.gzip', compression='gzip') return new_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_latest_data(self, symbol='SPY', n=1):\n return self.data.ix[-n:]", "def get_latest_data(self, symbol, n=1):\n raise NotImplementedError", "def fetch_history(self, dname, *args, **kwargs):\n if dname != 'returnsN':\n return super(QuoteFetcher, self).fetch_history(dname, *args, **kwargs)\n\n N, date, backdays = args\n ret = super(QuoteFetcher, self).fetch_history('returns', date, backdays+N-1, **kwargs)\n retN = pd.rolling_apply(ret.fillna(0), N, lambda x: (1+x).cumprod()[-1] - 1.)\n retN[ret.isnull()] = np.nan\n return retN.iloc[N-1:]", "def gather_stock_data(tickers, save=True):\n prices = pd.DataFrame()\n ts = TimeSeries(key='EY2QBMV6MD9FX9CP', output_format='pandas')\n\n for ticker in tickers:\n successful_grab = False\n ticker_daily_adj = None\n\n while successful_grab is not True:\n try:\n ticker_daily_adj = ts.get_daily_adjusted(ticker, outputsize='full')[0]\n successful_grab = True\n except ValueError:\n print('Waiting for API to let me in')\n time.sleep(10)\n\n ticker_daily_adj.loc[:, '0. ticker'] = ticker\n ticker_daily_adj = ticker_daily_adj[sorted(ticker_daily_adj.columns)]\n\n prices = pd.concat([prices, ticker_daily_adj])\n\n prices.sort_index(inplace=True)\n prices.reset_index(inplace=True)\n prices['date'] = pd.to_datetime(prices['date'])\n if save:\n prices.to_csv('stockdata.csv', index=True)\n\n return prices", "def get_data(end_date, n, local, foreign):\n URL = \"https://api.exchangeratesapi.io/history\"\n PARAMS = {'start_at': str(get_weekday_n_days_ago(end_date, n)),\n 'end_at': str(end_date),\n 'symbols': foreign,\n 'base': local}\n r = requests.get(url=URL, params=PARAMS)\n data = r.json()\n input_data = []\n for day in data['rates']:\n input_data.append([datetime.strptime(day, '%Y-%m-%d').date(),\n float(\"{:.8f}\".format(data['rates'][day][foreign]))])\n input_data.sort(key=lambda x: x[0])\n return input_data[-n:]", "def get_last_n_observation(self, n: int=1) -> List[np.ndarray]:\n self.connect_db()\n result = []\n c = self.conn.cursor()\n # Get minimum ts and maximum ts from perfs table\n c.execute('SELECT MIN(ts) as mints, MAX(ts) as maxts from perfs')\n # fetch execution into min and max ts\n min_ts, max_ts = c.fetchone()\n while True:\n try:\n # append observation from max_ts\n result.append(self.get_observation(max_ts))\n # check if getting enough observation\n if len(result) == n:\n self.conn.close()\n return result\n except NotEnoughDataError:\n if max_ts == min_ts:\n raise\n # keep subtract max_ts until min_ts\n max_ts -= 1", "def download_stocks(stocklist=STOCKLIST, fresh=False):\n # load stocklist\n with open(stocklist) as f:\n stocks = f.read().strip('\\n').split('\\n')\n\n dfs = {}\n for s in stocks:\n print(s)\n stockfile = '../stockdata/' + s + '.csv.gz'\n if fresh or not os.path.exists(stockfile):\n print('downloading fresh')\n stock = quandl.get('EOD/' + s)\n stock.to_csv(stockfile, compression='gzip')\n dfs[s] = stock\n continue\n\n else:\n stock = pd.read_csv(stockfile, index_col=0)\n stock.index = pd.to_datetime(stock.index)\n timedelta_step = 1\n if HOUR > 2 and WEEKDAY not in [5, 6]: # for mtn time\n timedelta_step = 0\n elif WEEKDAY == 0: # it's monday\n timedelta_step = 3 # can be up to last friday\n elif WEEKDAY in [5, 6]: # if a weekend, last data is from friday\n timedelta_step = WEEKDAY - 4\n print('date gap:', TODAY.date() - stock.iloc[-2:].index[-1].date())\n print('step, timedelta:', timedelta_step, datetime.timedelta(timedelta_step))\n if (TODAY.date() - stock.iloc[-2:].index[-1].date()) <= datetime.timedelta(timedelta_step):\n dfs[s] = stock\n print('latest date close enough to up-to-date:')\n print(stock.iloc[-2:].index[-1].date())\n print('not downloading')\n print('')\n continue\n else:\n print('latest date is')\n print(stock.iloc[-2:].index[-1].date())\n print('downloading fresh')\n stock = quandl.get('EOD/' + s)\n stock.to_csv(stockfile, compression='gzip')\n dfs[s] = stock\n\n return dfs", "def fetch(self, dname, *args, **kwargs):\n if dname != 'returnsN':\n return super(QuoteFetcher, self).fetch(dname, *args, **kwargs)\n\n N, args = args[0], args[1:]\n if 'backdays' in kwargs:\n kwargs['backdays'] += N-1\n else:\n if len(args) == 3:\n kwargs['backdays'] = args[2] + N-1\n args = args[:2]\n else:\n kwargs['backdays'] = N-1\n ret = super(QuoteFetcher, self).fetch('returns', *args, **kwargs)\n retN = pd.rolling_apply(ret.fillna(0), N, lambda x: (1+x).cumprod()[-1] - 1.)\n retN[ret.isnull()] = np.nan\n return retN.iloc[N-1:]", "def get_back_data(self, end_date=None, stocks=None):\n if end_date is None:\n end_date = self.dates[-1]\n\n if type(end_date) is not datetime.datetime and type(end_date) is not pd.tslib.Timestamp:\n end_date = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n\n if stocks is None:\n stocks = self.stocks\n\n info = {}\n for stock in stocks:\n info[stock] = self.stock_data[stock].to_stock_data_range(start_date=None, end_date=end_date)\n\n return info", "def store_stock_data(stock_name = 'TSLA'):\n stonk = yf.Ticker(stock_name) # gets stock data from yahoo\n hist = stonk.history(period=\"max\") # historical stock prices\n hist.reset_index(inplace=True) # takes the date stamp out of the index column\n hist.rename(columns = {'Date':\"DateTime\"},inplace=True) # Changes the name of the date column\n hist['DateTime'] = pd.to_datetime(hist['DateTime'],utc=True) # Changes the timestamps to UTC\n hist.to_csv('../data/raw/'+stock_name+'_stock_price.csv')\n return", "def giveHistoricalData(stockName):\n now = datetime.datetime.fromtimestamp(getTime())\n fiveDaysAgo = datetime.datetime.fromtimestamp(\n getTime() - daysToSeconds(5)\n )\n\n resp = json.dumps(\n getHistoricalData(stockName, fiveDaysAgo)\n )\n return resp", "def last_days_results(self, days):\n return self.security['Date', 'Close', 'FinalDecision'][-days:]", "def get_price_data(ticker, days_befoure):\r\n #config_file=raw_input('config file: ')\r\n config_file=\"d:/tmp/moex.json\" \r\n try:\r\n with open(config_file) as config_file: \r\n conn_data = json.load(config_file)\r\n except:\r\n print \"Error: Unable to read config file. \"\r\n sys.exit(1)\r\n\r\n username = conn_data['username']\r\n password = conn_data['password']\r\n my_config = Config(user=username, password=password, proxy_url='')\r\n\r\n my_auth = MicexAuth(my_config)\r\n date = datetime.datetime.now() - datetime.timedelta(days_befoure)\r\n \r\n #ticker = 'SBER' # for tesing...\r\n \r\n if my_auth.is_real_time():\r\n iss = MicexISSClient(my_config, my_auth, MyDataHandler, MyData)\r\n iss.get_history_securities('stock',\r\n 'shares',\r\n 'tqbr',\r\n ticker, \r\n date.strftime(\"%Y-%m-%d\")\r\n #here to be start end dates\r\n )\r\n #print iss.handler.data.history\r\n return iss.handler.data.as_dataframe()", "def stock_data(ticker, start,today=date.today()):\n df= web.DataReader(ticker,'yahoo',start,today)\n return df", "def get_full_history(symbol):\n to_date = int(datetime.datetime.timestamp(datetime.datetime.now()))\n from_date = int(datetime.datetime.timestamp(datetime.datetime(1990, 1, 1, 1, 0, 0)))\n url_base = \"https://query1.finance.yahoo.com/v7/finance/download/\"\n url_params = f\"{symbol}.NS?period1={from_date}&period2={to_date}&interval=1d&events=history\"\n resp = requests.get(url_base + url_params)\n a = csv_to_list(resp)[1:]\n return create_price(symbol, a)", "def get_nse_history(symbol, start, end):\n fetch_from_nse = False\n from_date = None\n\n start_date = dt.strptime(start, '%d-%m-%Y').date()\n end_date = dt.strptime(end, '%d-%m-%Y').date()\n\n # Sanitize dates\n if end_date < start_date:\n # Return empty dataframe\n return pd.DataFrame()\n\n # Fetch one week before and after the requested start and end date\n start_date_fetch = start_date - timedelta(weeks=1)\n end_date_fetch = end_date + timedelta(weeks=1)\n df = get_nse_history_from_cache(symbol, start_date_fetch.strftime('%d-%m-%Y'), end_date_fetch.strftime('%d-%m-%Y'))\n\n # Check if dataframe is empty\n if df.empty:\n fetch_from_nse = True\n from_date = start_date\n else:\n # Check if dataframe is incomplete\n # 1) We should get at least one entry which has date more than end_date\n # Otherwise, check the last date date and fetch from there.\n last_date_str = df.iloc[-1].Date\n last_date = dt.strptime(last_date_str, '%Y-%m-%d').date()\n\n if last_date < end_date:\n fetch_from_nse = True\n from_date = last_date + timedelta(days=1)\n logging.info(\"Incomplete data in cache. Need to fetch from NSE\")\n\n # 2) The first frame date should be equal to the start date\n first_date_str = df.iloc[0].Date\n first_date = dt.strptime(first_date_str, '%Y-%m-%d').date()\n if first_date > start_date:\n fetch_from_nse = True\n from_date = start_date\n\n if fetch_from_nse is True:\n # Fetch data from NSE. We fetch a minimum of 7 months of data in one query.\n to_date = from_date + timedelta(weeks=30)\n\n if end_date > to_date:\n to_date = end_date\n\n if to_date > date.today():\n to_date = date.today()\n\n if from_date == date.today():\n # If today is a weekend, return.\n weekno = dt.today().weekday()\n # Monday = 0, Friday = 4\n if weekno > 4:\n return df\n\n\n logging.info(\"Fetch data from NSE for %s from %s to %s\" % (symbol, from_date.strftime(\"%Y-%m-%d\"), to_date.strftime(\"%Y-%m-%d\")))\n\n try:\n data_df = get_history(symbol=symbol, start=from_date, end=to_date)\n except Exception as e:\n logging.error(\"Failed to download data for symbol {}: {}\".format(symbol, str(e)))\n sys.exit(0)\n\n try:\n db_instance = NSEDB()\n data_df.to_sql('NSECACHE', db_instance.conn, if_exists='append', index = True)\n except Exception as e:\n logging.error(\"{}\".format(str(e)))\n\n # We have updated the cache. Now loop over again to get the data from cache.\n df = get_nse_history_from_cache(symbol, start, end_date_fetch.strftime('%d-%m-%Y'))\n\n if not df.empty:\n df.set_index('Date', inplace=True)\n df.index = pd.to_datetime(df.index)\n # Prune the dataframe to fit between start date and end date.\n # Does not yet work: mask = (df.index >= start_date) & (df.index <= end_date)\n # df = df.loc[mask]\n # df = df.loc[start_date.strftime('%Y-%m-%d'): (end_date + timedelta(days=1)).strftime('%Y-%m-%d')]\n df = df.loc[start_date.strftime('%Y-%m-%d'): end_date.strftime('%Y-%m-%d')]\n\n\n return df", "def __get_all_data(self,tickr):\n self.__csvurl=f\"https://query1.finance.yahoo.com/v7/finance/download/{tickr}?period1=1092873600&period2={int(datetime.now().timestamp())}&interval=1d&events=history&includeAdjustedClose=true\"\n s=get_historic_data(self.__csvurl)\n\n \"\"\"you should not be able to access dataframe from outside the class\"\"\"\n df=pd.read_csv(io.StringIO(s.decode('utf-8')))\n df=df.dropna()\n df_columns=['Date','High','Low','Close','Adj Close']\n\n if not set(df_columns).issubset(df.columns):\n raise ValueError(f\"One or more columns are missing {df_columns}\")\n\n if len(df.index)<5:\n raise ValueError(f\"Cannot calculate EMA 5\")\n\n if len(df.index)<20:\n raise ValueError(f\"Cannot calculate SMA 20\")\n\n \"\"\"set date as index (required for filtering,sorting,grouping etc etc\"\"\"\n df['Date'] = pd.to_datetime(df['Date'], format = '%Y-%m-%d')\n\n df.set_index(['Date'], inplace=True)\n\n\n return df", "def get_stock_price(df_excld):\n\n ts = TimeSeries(os.environ['ALPHA_VANTAGE_KEY'])\n\n info = []\n symbols = []\n counter = 0\n\n for t in df_excld['Ticker']:\n\n if counter % 5 == 0:\n time.sleep(65)\n\n i, m = ts.get_daily(symbol=t, outputsize='full')\n info.append(i)\n symbols.append(m['2. Symbol'])\n counter += 1\n\n return info, symbols", "def update_data(self):\n data, meta_data = ts.get_daily(symbol=self.stock_ticker, outputsize='full')\n self.data = data\n self.meta_data = meta_data", "def get_back_dataframe(self, end_date=None, stocks=None):\n if end_date is None:\n end_date = self.dates[-1]\n\n if type(end_date) is not datetime.datetime and type(end_date) is not pd.tslib.Timestamp:\n end_date = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n\n if stocks is None:\n stocks = self.stocks\n\n info = {}\n for stock in stocks:\n info[stock] = self.stock_data[stock].to_stock_dataframe_range(start_date=None, end_date=end_date)\n\n return info", "def get_data_extended(self, inception_date, interval):\n instrument = self.instrumentLookup()\n from_date = dt.datetime.strptime(inception_date, \"%Y-%m-%d\")\n to_date = dt.date.today()\n data = pd.DataFrame(columns=[\"date\", \"open\", \"high\", \"low\", \"close\", \"volume\"])\n while True:\n if from_date.date() >= (dt.date.today() - dt.timedelta(100)):\n data = data.append(\n pd.DataFrame(\n self.kite.historical_data(\n instrument, from_date, dt.date.today(), interval\n )\n ),\n ignore_index=True,\n )\n break\n else:\n to_date = from_date + dt.timedelta(100)\n data = data.append(\n pd.DataFrame(\n self.kite.historical_data(\n instrument, from_date, to_date, interval\n )\n ),\n ignore_index=True,\n )\n from_date = to_date\n data.set_index(\"date\", inplace=True)\n self.data_df = data", "def stock():\n stock=stock_data('AAPL',start(2019,12,1))\n return stock", "def get_stock(symbol, start, end):\n df = pdr.DataReader(symbol, 'yahoo', start, end)\n df = df.sort_index(axis=0)\n return df", "def get_stock_daily(id: int, db: Session = Depends(get_db)):\n return crud.get_stock(id, db)", "def get_market_inf_more_data(start=None, end=None, index=None, retry_count=3, pause=0.001):\n # data to be sent to post request\n data = {'startDate': start,\n 'endDate': end,\n 'searchRecentMarket': 'Search Recent Market'}\n\n for _ in range(retry_count):\n time.sleep(pause)\n try:\n r = requests.post(\n url=vs.DSE_URL+vs.DSE_MARKET_INF_MORE_URL, data=data)\n except Exception as e:\n print(e)\n else:\n #soup = BeautifulSoup(r.text, 'html.parser')\n soup = BeautifulSoup(r.content, 'html5lib')\n\n quotes = [] # a list to store quotes\n\n table = soup.find('table', attrs={\n 'class': 'table table-bordered background-white text-center'})\n\n for row in table.find_all('tr')[1:]:\n cols = row.find_all('td')\n quotes.append({'Date': cols[0].text.strip().replace(\",\", \"\"),\n 'Total Trade': int(cols[1].text.strip().replace(\",\", \"\")),\n 'Total Volume': int(cols[2].text.strip().replace(\",\", \"\")),\n 'Total Value in Taka(mn)': float(cols[3].text.strip().replace(\",\", \"\")),\n 'Total Market Cap. in Taka(mn)': float(cols[4].text.strip().replace(\",\", \"\")),\n 'DSEX Index': float(cols[5].text.strip().replace(\",\", \"\")),\n 'DSES Index': float(cols[6].text.strip().replace(\",\", \"\")),\n 'DS30 Index': float(cols[7].text.strip().replace(\",\", \"\")),\n 'DGEN Index': float(cols[8].text.strip().replace(\"-\", \"0\"))\n })\n df = pd.DataFrame(quotes)\n if 'date' in df.columns:\n if (index == 'date'):\n df = df.set_index('date')\n df = df.sort_index(ascending=True)\n df = df.sort_index(ascending=True)\n else:\n print('No data found')\n return df", "def get_stock_historical_data(symbol, config):\n start_date_dt = datetime.datetime.strptime(config['start_date'], '%Y%m%d').date()\n end_date_dt = datetime.datetime.strptime(config['end_date'], '%Y%m%d').date()\n\n filename = '{}.csv'.format(symbol)\n dir_path = DATA_DIR / 'nasdaq_historical' / '{}_{}'.format(config['start_date'], config['end_date'])\n if not os.path.exists(dir_path):\n os.mkdir(dir_path)\n\n full_path = dir_path / filename\n if os.path.exists(full_path):\n df = pd.read_csv(full_path, header=0, index_col='Date', parse_dates=True)\n else:\n df = web.get_data_yahoo(symbol, config['start_date'], config['end_date'])\n if df.index[0].date() == start_date_dt and df.index[-1].date() == end_date_dt:\n df.to_csv(full_path)\n\n df.attrs.update(config)\n df.attrs['stock_symbol'] = symbol\n df.attrs['start_date_dt'] = datetime.datetime.strptime(config['start_date'], '%Y%m%d').date()\n df.attrs['end_date_dt'] = datetime.datetime.strptime(config['end_date'], '%Y%m%d').date()\n df.attrs['date_dir'] = '{}_{}'.format(config['start_date'], config['end_date']) # for building paths\n return df", "def get_historic_data(end_date = datetime.now(), \n start_date = datetime.now() + timedelta(-365),\n ticker=[],\n close_only=True):\n #checks if the parameters provided through \"ticker\" is not an empty list\n #if it is, the function won't go forward after this point. returns explanatory message.\n if ticker == []:\n return \"Empty list of tickers\"\n \n #if a string is provided as \"ticker\" parameter, then it splits the string by \n #spaces and store the outcome in a list.\n elif type(ticker) is str:\n ticker = ticker.split(\" \")\n \n iex_token = os.getenv(\"IEX_TOKEN\")#not necessary anymore.\n if type(iex_token) == str: print(\"IEX Key found successfully ...getting data\")\n else: return \"Error: IEX Key NOT found\"\n \n \n #Gets historical data with the parameters provided.\n #Gets only \"close\" and \"volume\" value for efficiency.\n prices = get_historical_data(ticker, start_date, end_date,\n output_format='pandas', \n token=iex_token, \n close_only=close_only\n )\n \n #If only one ticker is provided, then it adds another indexing level to the column\n #with the ticker. This is done for two reasons: 1) To visualize the ticker downloaded \n #as a confirmation that I am working with correct data. 2) To mimic the format of the\n #dataframe obtained when getting 2 or more tickers data (2-level column indexing).\n if len(ticker) == 1:\n new_columns = pd.MultiIndex.from_product([ [ticker[0]],prices.columns ] )\n prices.columns = new_columns\n \n return prices", "def getHistoricalData(stockName, startDate):\n conn = r.connect(db = db.DB)\n stockName = stockName.upper()\n startDate = dateToString(startDate)\n endDate = dateToString(datetime.datetime.now())\n\n if not stockName in db.STOCK_MAP.keys():\n return dict(\n error = 1,\n message = \"The info you want is not what I can give\"\n )\n\n stock = yf.StockInfo(stockName + db.IN_LONDON)\n cachedData = r.table(db.HISTORICAL_TABLE).get(stockName).run(conn)\n infoDict = dict()\n\n if cachedData == None:\n print \"\\n-- DB -- \" + stockName + \" == Inserting New Information ==\\n\"\n histList = stock.historical_prices(startDate, endDate)\n infoDict[\"history_list\"] = createHistoryDictList(histList)\n infoDict[\"index\"] = stockName\n infoDict[\"name\"] = db.STOCK_MAP[stockName]\n infoDict[\"timestamp\"] = getTime()\n r.table(db.HISTORICAL_TABLE).insert(infoDict).run(conn)\n else:\n elapsedTime = (\n getTime() -\n cachedData[\"timestamp\"]\n )\n if elapsedTime > db.HISTORICAL_INTERVAL:\n print \"\\n-- DB -- \" + stockName + \" == Updating Database ==\\n\"\n histList = stock.historical_prices(startDate, endDate)\n infoDict[\"history_list\"] = createHistoryDictList(histList)\n infoDict[\"index\"] = stockName\n infoDict[\"timestamp\"] = getTime()\n r.table(db.HISTORICAL_TABLE).get(stockName).update(\n infoDict\n ).run(conn)\n else:\n print \"\\n-- DB -- \" + stockName + \" == Using Cached Data ==\\n\"\n infoDict = cachedData\n\n infoDict[\"name\"] = db.STOCK_MAP[stockName]\n return infoDict", "def currency_prices_last_n_days(currency = 'ETH', to = 'EUR', days = 14):\n\n currencies = 'fsym={0}&tsym={1}'.format(currency, to)\n days = 'limit={0}'.format(days)\n\n req = requests.get( 'https://min-api.cryptocompare.com/data/histoday?'\n + currencies\n + '&'\n + days\n + '&aggregate=1&e=CCCAGG')\n\n result = req.json()\n\n list = [float(day['close']) for day in result['Data']]\n\n return list", "def get_data(ticker, interval, start_date, end_date):\r\n # Display indication\r\n print('[INFO] {} - Retrieving {}_{} historical data'.format(get_now(), ticker, interval))\r\n # Download ticker's ohlcv\r\n ohlcv = yf.download(tickers=ticker, start=start_date, end=end_date, interval=interval)\r\n # Modify dataframe\r\n ohlcv.drop(columns=['Adj Close'], inplace=True)\r\n ohlcv.sort_index(axis=0, ascending=False, inplace=True)\r\n ohlcv.reset_index(inplace=True)\r\n if \"Datetime\" in ohlcv.columns:\r\n ohlcv['Datetime'] = ohlcv['Datetime'].astype(str).str[:-9]\r\n return ohlcv" ]
[ "0.6434268", "0.6245548", "0.62428504", "0.621487", "0.6203779", "0.6103657", "0.6098898", "0.6059008", "0.6043774", "0.6032302", "0.6018421", "0.5985612", "0.596977", "0.5917684", "0.5898063", "0.58806944", "0.5870792", "0.58161986", "0.57779366", "0.5773703", "0.5767346", "0.57607675", "0.575513", "0.573915", "0.5734364", "0.5732744", "0.57045275", "0.56764036", "0.5669089", "0.56649965" ]
0.7667367
0
With about 8k stocks and about 2s per stock download, this would take forever. Don't use.
def download_all_stocks(): stocks = get_stocklist() dfs = {} for i, r in stocks.iterrows(): start = time.time() s = r['Ticker'] stockfile = '../stockdata/' + s + '.csv.gz' print('downloading', s) stock = quandl.get('EOD/' + s) stock.to_csv(stockfile, compression='gzip') dfs[s] = stock print('took', time.time() - start, 's') return dfs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_all():\r\n f = open('stock_symbols.txt', 'r')\r\n fout = open('../data/stocks_read.txt', 'w')\r\n count_max = 500\r\n count = 0\r\n for stock_symbol in f:\r\n stock_symbol = stock_symbol.strip()\r\n try:\r\n stock_download(stock_symbol)\r\n fout.write(stock_symbol + '\\n')\r\n except:\r\n print(\"was not able to read file \", stock_symbol)\r\n count = count + 1\r\n if count >= count_max:\r\n break\r\n f.close()\r\n fout.close", "def download_stocks(stocklist=STOCKLIST, fresh=False):\n # load stocklist\n with open(stocklist) as f:\n stocks = f.read().strip('\\n').split('\\n')\n\n dfs = {}\n for s in stocks:\n print(s)\n stockfile = '../stockdata/' + s + '.csv.gz'\n if fresh or not os.path.exists(stockfile):\n print('downloading fresh')\n stock = quandl.get('EOD/' + s)\n stock.to_csv(stockfile, compression='gzip')\n dfs[s] = stock\n continue\n\n else:\n stock = pd.read_csv(stockfile, index_col=0)\n stock.index = pd.to_datetime(stock.index)\n timedelta_step = 1\n if HOUR > 2 and WEEKDAY not in [5, 6]: # for mtn time\n timedelta_step = 0\n elif WEEKDAY == 0: # it's monday\n timedelta_step = 3 # can be up to last friday\n elif WEEKDAY in [5, 6]: # if a weekend, last data is from friday\n timedelta_step = WEEKDAY - 4\n print('date gap:', TODAY.date() - stock.iloc[-2:].index[-1].date())\n print('step, timedelta:', timedelta_step, datetime.timedelta(timedelta_step))\n if (TODAY.date() - stock.iloc[-2:].index[-1].date()) <= datetime.timedelta(timedelta_step):\n dfs[s] = stock\n print('latest date close enough to up-to-date:')\n print(stock.iloc[-2:].index[-1].date())\n print('not downloading')\n print('')\n continue\n else:\n print('latest date is')\n print(stock.iloc[-2:].index[-1].date())\n print('downloading fresh')\n stock = quandl.get('EOD/' + s)\n stock.to_csv(stockfile, compression='gzip')\n dfs[s] = stock\n\n return dfs", "def get_stock_price(df_excld):\n\n ts = TimeSeries(os.environ['ALPHA_VANTAGE_KEY'])\n\n info = []\n symbols = []\n counter = 0\n\n for t in df_excld['Ticker']:\n\n if counter % 5 == 0:\n time.sleep(65)\n\n i, m = ts.get_daily(symbol=t, outputsize='full')\n info.append(i)\n symbols.append(m['2. Symbol'])\n counter += 1\n\n return info, symbols", "async def stocks(self, ctx):\n\t\tpass", "def get_52_week_high_low_for_stocks(stocks):\n print(\"Fetching stock quotes.\")\n # Build a full list of symbols\n symbols = []\n for key in stocks.keys():\n symbols.append(key)\n\n num_of_batches = int(len(symbols)/BATCH_SIZE) + 1\n\n all_stocks_df = pandas.DataFrame()\n\n #all_stocks_df = pandas.DataFrame()\n\n # Get quotes for all the stocks in batches\n for i in range(0, num_of_batches):\n print(\"Fetching quotes in batch: \" + str(i+1) + \"/\" + str(num_of_batches))\n start = i*BATCH_SIZE\n end = start + BATCH_SIZE\n batch_symbols = symbols[start: end]\n batch_symbols_query = '+'.join(batch_symbols)\n request_url = YAHOO_FINANCE_API + \"?\" + YAHOO_FINANCE_SYMBOL_PARAM + \"=\" + batch_symbols_query +\\\n \"&\" + YAHOO_FINANCE_FORMAT_PARAM + \"=\" + YAHOO_FINANCE_SYMBOL_PARAM + YAHOO_FINANCE_52_ASK_PRICE +\\\n YAHOO_FINANCE_BID_PRICE + YAHOO_FINANCE_52_CLOSE_PRICE + YAHOO_FINANCE_52_WEEK_LOW +\\\n YAHOO_FINANCE_52_WEEK_HIGH + YAHOO_FINANCE_52_LOW_CHANGE +\\\n YAHOO_FINANCE_52_HIGH_CHANGE + YAHOO_FINANCE_DIV_YIELD\n r = requests.get(request_url)\n\n # Read the returned CSV as a pandas table\n # Returned format is NAME,ASK,BID,52-wLow,52-wHigh\n df = pandas.read_table(StringIO(r.text), header=None, sep=',')\n all_stocks_df = all_stocks_df.append(df, ignore_index=True)\n\n # Delay to slow down things\n time.sleep(1)\n\n\n # Assign columns\n print(\"Stock quotes have been fetched. Beginning analysis...\")\n all_stocks_df.columns=['symbol', 'ask', 'bid', 'close', '52w-low', '52w-high', '52w-low-change', '52w-high-change', 'div-iteryield']\n\n # Add the percent change columns\n all_stocks_df['52w-%-low-change'] = all_stocks_df['52w-low-change']/all_stocks_df['52w-low']*100\n all_stocks_df['52w-%-high-change'] = all_stocks_df['52w-high-change'] / all_stocks_df['52w-high'] * 100\n\n # Add the names and sectors\n all_stocks_df['name'] = \"\"\n all_stocks_df['sector'] = \"\"\n for index, row in all_stocks_df.iterrows():\n all_stocks_df.loc[index, 'name'] = stocks[row['symbol']][0]\n all_stocks_df.loc[index, 'sector'] = stocks[row['symbol']][1]\n\n\n # Process the received quotes\n sorted_values = all_stocks_df.sort_values('52w-%-low-change')\n\n # Done\n print(\"Analysis completed.\")\n return sorted_values", "def download(output_path=None):\n base_url = r'http://www.stockpup.com/data/'\n\n # we first extract the list of stocks available on the website stockpup.com\n # At the time of writing, there is a Download section on the right side of the home page.\n # we will grab the list of stocks there\n\n response = urllib2.urlopen(base_url)\n\n list_of_stock_symbol = []\n list_duplicates = []\n\n pattern = '\\s+title=\\\"fundamental_data_excel_(\\w+)\\.csv\\\">'\n\n for line in response.readlines():\n if line.find('csv') != -1:\n # print(line)\n res = re.findall(pattern, line)\n if res and res[0] in list_of_stock_symbol:\n print('{} already exist. This file will not be download and the existing one will be removed. Please download manually.'.format(res[0]))\n list_duplicates.append(res[0])\n continue\n\n res and list_of_stock_symbol.append(res[0])\n\n\n # Here we remove the duplicates in the list of symbol\n # For those symbols that have duplicates, we need to create the list manually.\n list_of_stock_symbol = [x for x in list_of_stock_symbol if not x in list_duplicates]\n\n # Now create the list manually for the symbols that have duplicates.\n # The exact filename can be found via \"inspect element\"\n print(list_duplicates)\n\n # For now we just ignore these stocks. It is not clear what is going on with these stocks. There may be some\n # mislabeling here.\n pass\n\n # download the csv files\n # the csv files are load into pd.DataFrame. For each csv file, we add a column indicating the symbol of the stock\n list_df_stock = []\n list_failure = []\n\n suffix = '_quarterly_financial_data.csv'\n\n for item in list_of_stock_symbol:\n filename = ''.join([item,suffix])\n target_url = urlparse.urljoin(base_url, filename)\n try:\n df = pd.read_csv(target_url)\n df['symbol'] = item\n list_df_stock.append(df)\n except Exception as e:\n print(\"Failed to download file for {}\".format(item))\n print('Error message:', e)\n list_failure.append(item)\n\n\n df_out = pd.concat(list_df_stock, axis=0)\n\n if output_path is not None:\n df_out.to_csv(output_path, index=False)\n return df_out", "def prices(symbol):\n to = date.today().strftime(\"%Y%m%d\")\n c = db.cursor()\n c.execute(\"SELECT DATE_ADD(max(date), INTERVAL 1 DAY) FROM quote where symbol = %s\",\n (symbol))\n (_from, ) = c.fetchone()\n if _from == date.today():\n print \"Skipping %s\" % symbol\n return\n print \"Downloading %s\" % symbol\n if _from is None: \n _from = start_date\n else:\n _from = _from.strftime(\"%Y%m%d\")\n prices = stockquote.get_historical_prices(symbol, _from, to)\n headers = prices[0]\n try:\n close = get_idx(headers, 'Close')\n date_ = get_idx(headers, 'Date')\n open = get_idx(headers, 'Open')\n high = get_idx(headers, 'High')\n low = get_idx(headers, 'Low')\n quotes = prices[1:]\n for l in quotes:\n #print \"%s %s\" % (l[date_], l[close])\n try:\n insert(symbol, l[date_], l[close], l[high], l[low], l[open])\n except Exception, e:\n print \"Could not insert %s:%s\" % (symbol, e)\n print \"Inserted %s new quotes for %s\" % (len(quotes), symbol)\n except Exception, e:\n print \"Could not download %s\" % symbol\n print e", "def get_all_binance_modified(symbol, kline_size, save=True, client=Client()):\n\n filename = 'history/%s-%s-data.csv' % (symbol, kline_size)\n if os.path.isfile(filename):\n data_df = pd.read_csv(filename)\n else:\n data_df = pd.DataFrame()\n oldest_point, newest_point = minutes_of_new_data(symbol, kline_size, data_df, source=\"binance\", client=client)\n oldest_point = datetime.strptime('23 Sep 2021', '%d %b %Y')\n delta_min = (newest_point - oldest_point).total_seconds() / 60\n available_data = math.ceil(delta_min / binsizes[kline_size])\n print(oldest_point)\n if oldest_point == datetime.strptime('1 Jan 2017', '%d %b %Y'):\n print('Downloading all available %s data for %s. Be patient..!' % (kline_size, symbol))\n else:\n print('Downloading %d minutes of new data available for %s, i.e. %d instances of %s data.' % (\n delta_min, symbol, available_data, kline_size))\n klines = client.get_historical_klines(symbol, kline_size, oldest_point.strftime(\"%d %b %Y %H:%M:%S\"),\n newest_point.strftime(\"%d %b %Y %H:%M:%S\"))\n data = pd.DataFrame(klines,\n columns=['timestamp', 'open', 'high', 'low', 'close', 'volume', 'close_time', 'quote_av',\n 'trades', 'tb_base_av', 'tb_quote_av', 'ignore'])\n data['timestamp'] = pd.to_datetime(data['timestamp'], unit='ms')\n if len(data_df) > 0:\n temp_df = pd.DataFrame(data)\n data_df = data_df.append(temp_df)\n else:\n data_df = data\n data_df.set_index('timestamp', inplace=True)\n data_df = data_df[~data_df.index.duplicated(keep='last')]\n if save and os.path.exists('./history'): data_df.to_csv(filename)\n print('All caught up..!')\n data_df.index = pd.to_datetime(data_df.index, utc=True)\n data_df = data_df[~data_df.index.duplicated(keep='last')]\n return data_df.astype(float)", "def download_intraday_extended(conn, logger, slice='year1month1'):\n # 下载地址\n url_pattern = \"https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY_EXTENDED&symbol={}&interval=5min&slice={}&adjusted=true&apikey=\" + api_key\n Symbol = 'UVXY'\n path_root = 'stocks/data/'\n logger.info(f'Start downloading slice {slice}')\n # 判断curStocks表是否为空,否,则遍历curStocks,是,则将Stocks表中的所有股票代码插入curStocks表,遍历Stocks\n # 判断curStocks表是否为空\n cursor0 = conn.cursor()\n cursor0.execute('select count(*) from curStocks')\n result0 = cursor0.fetchall()\n count = int(result0[0][0])\n cursor0.close()\n if count == 0:\n # 遍历Stocks表\n cursor1 = conn.cursor()\n cursor1.execute('select count(*) from Stocks')\n result1 = cursor1.fetchall()\n count = int(result1[0][0])\n cursor1.execute('insert into curStocks (Symbol) select Symbol from Stocks')\n cursor1.close()\n conn.commit()\n\n while count > 0:\n # 遍历curStocks表\n cursor1 = conn.cursor()\n cursor1.execute('SELECT Symbol FROM curStocks')\n result = cursor1.fetchall()\n cursor1.close()\n\n for line in result:\n Symbol = line[0].strip()\n logger.info(f'Current stock code: {Symbol}')\n\n # 下载地址url\n url = url_pattern.format(Symbol, slice)\n logger.debug(url)\n\n try:\n # 把下载地址发送给requests模块\n f = requests.get(url, timeout=10) # 设置超时\n\n # 下载文件\n path = f'{path_root}{Symbol}_{slice}.csv'\n logger.debug(f'File saved to: {path}')\n with open(path, \"wb\") as code:\n code.write(f.content)\n\n except Exception as e:\n logger.debug(e)\n logger.error(Symbol + '下载失败')\n # time.sleep(random.randint(30, 60))\n continue\n # logger.debug(curDate)\n # 取出上次的数据日期\n cursor0 = conn.cursor()\n cursor0.execute('select max(timestamp) from IntradayQuotes where Symbol = ?;', Symbol)\n result0 = cursor0.fetchall()\n cursor0.close()\n oldDate = result0[0][0] if result0[0][0] else None\n logger.info(f'Last record in database: {oldDate}')\n # 写入数据库\n with open(path, 'r') as csvfile:\n next(csvfile)\n cursor2 = conn.cursor()\n read = csv.reader(csvfile) # 逐行读取csv文件,并写入\n for i, one_line in enumerate(read):\n newDate = datetime.datetime.strptime(one_line[0], '%Y-%m-%d %H:%M:%S')\n if oldDate and newDate <= oldDate:\n logger.info(f'Imported {i} new records.')\n break\n cursor2.execute(\"INSERT INTO IntradayQuotes VALUES (?,?,?,?,?,?,?)\", (\n Symbol, one_line[0], one_line[1], one_line[2], one_line[3], one_line[4], one_line[5]))\n else:\n logger.info(f'Imported {i + 1} new records.')\n cursor2.close()\n\n # 更新curStocks表\n cursor3 = conn.cursor()\n cursor3.execute(\"delete from curStocks where Symbol = ?\", Symbol)\n cursor3.close()\n\n conn.commit()\n wait_time = random.randint(10, 30)\n logger.debug(f'Waiting for {wait_time} seconds to continue...')\n time.sleep(wait_time)\n\n # 检查curStocks是否还有未下载的股票\n cursor4 = conn.cursor()\n cursor4.execute('select count(*) from curStocks')\n result4 = cursor4.fetchall()\n count = int(result4[0][0])\n cursor4.close()\n if count > 0:\n logger.info('本轮下载失败数量:' + str(count))\n\n logger.info(f'Slice {slice} has been downloaded.')", "def old_start_to_scrape_stocks():\n # the way it works is: 20 stocks are displayed per page, and the r= paramater in the url tells where to start listing with the stocks\n res = req.get(stocks_url.format('1'), headers={'user-agent': ua.random})\n soup = bs(res.content, 'lxml')\n # get last page number to get pages that need to be iterated through\n last_page_num = int(soup.findAll('a', {'class': 'screener-pages'})[-1].text)\n # the last page should be the (last page number - 1) * 20 + 1\n last_r = (last_page_num - 1) * 20 + 1 + 1 # add another one for range to work\n for p in range(21, last_r, 20):\n pass", "def ShyRetrieve(symbol, minDate=None, downloadMissing=None):\n import os\n for file in os.listdir(\"Cache\"):\n if file == f\"{symbol}.csv\":\n print(f\"Parsing {symbol} from local drive.\")\n stock = Stock.ParseCSV(\"Cache/\" + file)\n if len(stock.history) != 0 and (minDate == None or stock.history[-1].date >= minDate):\n return stock\n\n def okayToDownload():\n if minDate != None:\n return True\n if downloadMissing != None:\n return downloadMissing\n print(f\"{symbol} not found in local drive. Okay to download from yfinance? (y/n)\")\n response = input()\n if response.lower() == \"y\":\n return True\n elif response.lower() == \"n\":\n return False\n else:\n print(\"Please respond \\\"y\\\" or \\\"n\\\".\")\n return okayToDownload()\n\n if okayToDownload():\n #stock = Stock.FromYfinance(symbol=symbol)\n stock = Stock(symbol=symbol)\n stock.Update()\n stock.SaveToCSV()\n print(f\"{stock.name} downloaded from yfinance API.\")\n return stock\n else:\n return Stock(symbol=symbol)", "def test_find_stock_items(self):\n pass", "def get_stock_data():\n if not os.path.exists('./catalog/stock_data'):\n os.mkdir('./catalog/stock_data')\n \n inventory_data = {}\n inventory_file = './catalog/stock_data/inventory-bro.txt'\n \n download_data = True\n if os.path.exists(inventory_file):\n # Check that inventory file is no more than 1 day old\n filestat = os.stat(inventory_file)\n tm = datetime.datetime.fromtimestamp(filestat.st_mtime)\n today = datetime.datetime.now()\n dt = today - tm\n if dt.days < 1:\n download_data = False\n \n if download_data:\n # Get inventory data from ftp site\n from ftplib import FTP_TLS\n print 'Downloading inventory-bro.txt ....'\n ftps = FTP_TLS('ftp.appareldownload.com')\n ftps.login('Br0d3r', 'Br0d3r2oll')\n ftps.prot_p()\n #ftps.retrlines('LIST')\n ftps.retrbinary('RETR inventory-bro.txt', open(inventory_file, 'wb').write)\n ftps.quit()\n \n print \"Parse inventory-bro.txt ... \"\n first_row = None\n for row in csv.reader(open(inventory_file, 'rb')):\n itemRef = row[4].lower()\n if itemRef == 'style number':\n # save first row to be used as column header\n first_row = row\n continue\n \n source_attribs = [{'attribute_type': 'source', 'attribute_value': 'broderbros'}]\n \n inventory_data.setdefault(itemRef, [])\n \n color = row[8].lower()\n size = row[10].lower()\n \n # Warehouses starts at column 13\n for i in range(13, len(first_row)):\n wh_name = first_row[i]\n options = [\n {'option_type': 'color', 'option_value': color, 'attributes': []},\n {'option_type': 'size', 'option_value': size, 'attributes': []},\n {'option_type': 'warehouse', 'option_value': wh_name, 'attributes': source_attribs, 'shared': True},\n {'option_type': 'vendor', 'option_value': 'broderbros', 'attributes': source_attribs, 'shared': True},\n ]\n inventory_data[itemRef].append({'options': options, 'inventory': row[i]})\n \n # Pricing data\n pricing_tarfile = \"./catalog/stock_data/bro-AllStyles_R06.tar.gz\"\n download_data = True\n if os.path.exists(pricing_tarfile):\n # Check that file is no more than 1 day old\n filestat = os.stat(pricing_tarfile)\n tm = datetime.datetime.fromtimestamp(filestat.st_mtime)\n today = datetime.datetime.now()\n dt = today - tm\n if dt.days < 1:\n download_data = False\n \n if download_data:\n print 'Downloading items.csv for price data ....'\n br = utils.create_browser(1, 2)\n br.open(\"https://www.broderbros.com/cgi-bin/online/webbro/bro-index.w\")\n try:\n # Fill login form\n br.select_form(name = 'frmLogin')\n frm = br.form\n \n ctrl = frm.find_control('userName')\n ctrl.value = USERNAME\n ctrl = frm.find_control('password')\n ctrl.value = PASSWORD\n \n # Submit login form\n if TESTRUN: print 'Submit Login Form'\n \n br.select_form(name = 'frmLogin')\n br.submit()\n except:\n print \"Login form does not exist, please check URL, downloaded html or site is down\"\n return None\n try:\n tar_url = \"https://www.broderbros.com/cgi-bin/download/webshr/prod-info-view.w?f=bro-AllStyles_R06.tar.gz\"\n br.retrieve(tar_url, pricing_tarfile)\n except:\n print \"Error when downloading pricing file\"\n return None\n \n try:\n tar = tarfile.open(pricing_tarfile)\n for member in tar.getmembers():\n member.name = member.name.split('/')[-1] # strip directory from filename\n tar.extractall('catalog/stock_data/bro-AllStyles_R06')\n tar.close()\n except:\n print \"Error when extracting items.csv\"\n return None\n \n f_object = open('./catalog/stock_data/bro-AllStyles_R06/items_R06.csv', 'rb')\n #~ f_object = open('items_R06.csv', 'rb')\n \n print \"Parse items_R06.csv ... \"\n for row in csv.reader(f_object):\n itemRef = row[7].lower()\n if itemRef == 'style code':\n continue\n \n size = row[8].lower()\n color = row[11].lower()\n price = row[18]\n \n item_data = inventory_data.get(itemRef)\n if not item_data:\n continue\n # Find data with same size and color\n for var_dict in item_data:\n options = var_dict['options']\n opt_dict = {}\n for opt in options:\n opt_type = opt['option_type']\n opt_value = opt['option_value']\n if opt_type == 'size':\n opt_dict['size'] = opt_value\n elif opt_type == 'color':\n opt_dict['color'] = opt_value\n if opt_dict['size'] == size and opt_dict['color'] == color:\n var_dict['price'] = [{'price_type': 'retail_price', 'price': price}]\n \n f_object.close()\n \n try:\n shutil.rmtree(\"./catalog/stock_data/bro-AllStyles_R06\")\n #~ os.remove(\"./catalog/stock_data/bro-AllStyles_R06.tar.gz\")\n except:\n pass\n \n return inventory_data", "def get_put_data(stock_name, expire_time, strike_price):\n date = time.mktime(datetime.datetime.strptime(expire_time, \"%d/%m/%Y\").timetuple())+(16*3600)\n url = 'https://finance.yahoo.com/quote/'+stock_name+'/options?date='+str(int(date))+'&p='+stock_name\n print(url)\n response = requests.get(url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n values = soup.findAll(\"table\")[1].findAll(\"td\")\n\n for i in range(2,len(values),11):\n x = float(str(values[i].contents[0].contents[0]))\n if x == float(strike_price):\n option_link = 'https://finance.yahoo.com/'+str(values[i-2].contents[0])[61:109]\n bid = float(values[i+2].contents[0])\n ask = float(values[i+3].contents[0])\n return bid, ask", "def getStockData():\n pass", "def stock_processor(id, price, title, remaining, totalPackCount, preorder, start, proxy, headers):\n\n r = request_pack_stock(proxy, headers)\n packs = r['data']['searchPackListings']['data']['searchSummary']['data']['data']\n\n for pack in packs:\n item = [pack['id'], pack['title'], pack['price'], pack['remaining'], pack['totalPackCount'], pack['preorder']]\n #print(f'\\n\\nITEM:{item}\\n\\n')\n if pack['remaining'] == remaining: #change back to !=\n # Checks if it already exists in our instock\n if checker(item):\n pass\n else:\n # Add to instock dict\n INSTOCK.append(item)\n print(f'\\n\\nINSTOCK:{INSTOCK}\\n\\n')\n # Send a notification to the discord webhook with the in-stock product\n if start == 0:\n print('Sending new Notification')\n print(item)\n discord_webhook(item)\n logging.info(msg='Successfully sent Discord notification')\n\n else:\n if checker(item):\n INSTOCK.remove(item)", "def get_stocks():\n print(\"fetching remote...\")\n code_dataframes = pd.read_html(\n 'http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0]\n # 우리가 필요한 것은 회사명과 종목코드이기 때문에 필요없는 column들은 제외해준다.\n print(\"parsing and filtering data...\")\n code_dataframes.종목코드 = code_dataframes.종목코드.map('{:06d}'.format)\n # 한글로된 컬럼명을 영어로 바꿔준다.\n code_dataframes = code_dataframes[['회사명', '종목코드']]\n code_dataframes = code_dataframes.rename(\n columns={'회사명': 'name', '종목코드': 'code'})\n codes = code_dataframes['code']\n names = code_dataframes['name']\n stocks = []\n for i in range(len(names)):\n stocks.append({\n 'name': names[i],\n 'code': codes[i]\n })\n return stocks", "def update_all_stocks(return_headers=False, update_small_file=False):\n # 7-13-2017: 28788363 rows in full df\n zip_file_url = 'https://www.quandl.com/api/v3/databases/EOD/download?api_key=' + \\\n Q_KEY + '&download_type=partial'\n r = req.get(zip_file_url)\n z = zipfile.ZipFile(io.BytesIO(r.content))\n z.extractall(path='../stockdata/')\n if return_headers:\n df = pd.read_csv('../stockdata/' + z.filelist[0].filename, parse_dates=True)\n df.set_index('Date', inplace=True)\n new_c = [re.sub('.\\s', '_', c) for c in df.columns]\n return new_c\n\n df = pd.read_csv('../stockdata/' + z.filelist[0].filename)\n # it won't parse dates when it reads...\n df['Date'] = pd.to_datetime(df['Date'])\n df.set_index('Date', inplace=True)\n # fix problem with . and _ in Adjusted cols\n new_c = [re.sub('.\\s', '_', c) for c in df.columns]\n df.columns = new_c\n full_df = pd.read_csv('../stockdata/all_stocks.csv.gzip',\n parse_dates=True,\n compression='gzip',\n index_col=0)\n if (full_df.columns == df.columns).mean() != 1:\n print('WARNING! Columns in the full df do not match the updated df columns.')\n print('full_df cols:')\n print(full_df.columns)\n print('')\n print('update df cols:')\n print(df.columns)\n print('')\n print('aborting and returning current full_df')\n return full_df\n\n if df.index.max() > full_df.index.max():\n df.to_csv('../stockdata/all_stocks.csv.gzip', mode='a', compression='gzip')\n dtypes = ['object'] + ['float64'] * 10\n full_df = pd.read_csv('../stockdata/all_stocks.csv.gzip',\n parse_dates=True,\n compression='gzip',\n index_col=0,\n dtype=dtypes)\n\n os.remove('../stockdata/' + z.filelist[0].filename)\n\n return full_df", "def download_files(df, workdir):\n size = 0\n \n for index, row in df.iterrows():\n filename = os.path.join(workdir, 'song_' + str(index) + '.mp3')\n\n url = row['Download URLs'] \n\n if index%10==0:\n print(index, \"Current Time =\", datetime.now().strftime(\"%H:%M:%S\"))\n \n now = datetime.now()\n try:\n size += download_file_(url, filename)\n except:\n continue\n \n return(size)", "def stock_download(stock_symbol, day=None, month=None, year=None):\r\n\r\n page = stock_url(stock_symbol, day, month, year)\r\n response = request.urlopen(page)\r\n csv = response.read()\r\n\r\n # Save the string to a file\r\n csvstr = str(csv).strip(\"b'\")\r\n out_file = \"../data/\"\r\n out_file = ''.join([out_file, stock_symbol, \".csv\"])\r\n\r\n lines = csvstr.split(\"\\\\n\")\r\n# #f = open(\"../data/historical.csv\", \"w\")\r\n print(out_file)\r\n f = open(out_file, \"w\")\r\n f.write(lines[0] + '\\n') # write out the header\r\n #next(lines)\r\n for line in lines[1:]:\r\n elements = line.split(',')\r\n if len(elements) > 1 and float(elements[6]):\r\n f.write(line + \"\\n\")\r\n f.close()", "def stock_market(no_profiles: int) -> tuple:\n all_companies = []\n Stocks = namedtuple(\"Stocks\", 'name symbol open high close company_weight')\n MkValue_ = random.uniform(1000, 50000, 100)\n wts_ = random.uniform(0, 1, 100)\n wts_ = wts_/sum(wts_)\n\n for _ in range(100):\n name = fake.company()\n open_ = round(MkValue_[_]*wts_[_],2)\n close = round(open_ * random.uniform(0.7, 1.15), 2)\n high = round(open_ * random.uniform(0.85, 1.15), 2)\n if high < open_:\n high = open_\n if high < close:\n high = close\n\n all_companies.append(\n Stocks(name=name, symbol=symbol(name), open=open_, high=round(high, 2), close=round(close, 2), company_weight=round(wts_[_], 4)))\n\n stock_index = round(\n sum(x.open * x.company_weight for x in all_companies), 4)\n highest_for_day = round(\n sum(x.high * x.company_weight for x in all_companies), 2)\n lowest_close_for_day = round(\n sum(x.close * x.company_weight for x in all_companies), 2)\n\n # print(f\"\\n------------------------------------Top 100 listed companies on Fake Stock Exchange------------------------------------\")\n # [print(x) for x in sorted(all_companies, key=lambda x:x.symbol)]\n # print(f\"\\n--------------Main details on {date.today()}--------------\")\n # print(f\"\\nStart of the day: {stock_index}\")\n # print(f\"Highest for the day: {highest_for_day}\")\n # print(f\"Lowest close for the day: {lowest_close_for_day}\")\n return sorted(all_companies, key=lambda x: x.symbol), stock_index, highest_for_day, lowest_close_for_day", "def get_com_data_fr_all_stocks(self):\n full_list = self.replace_special_characters_in_list(self.full_stocklist_to_retrieve)\n chunk_of_list = self.break_list_to_sub_list(self.full_stocklist_to_retrieve)\n \n self.temp_full_data_df = None\n for n in chunk_of_list:\n # print the progress\n sys.stdout.write('.')\n\n # set the small chunk of list\n self.set_target_stocks_list(n)\n self.get_com_data()\n\n # convert to dataframe\n self.com_data_allstock_df = pandas.DataFrame(self.com_data_allstock_list)\n self.com_data_allstock_df.rename(columns ={'symbol':'SYMBOL'}, inplace=True)\n \n print 'Done\\n'", "def track_price():\n r = requests.get('https://finance.yahoo.com/quote/EURPLN=X?p=EURPLN%3DX&.tsrc=fin-srch&guce_referrer'\n '=aHR0cHM6Ly9maW5hbmNlLnlhaG9vLmNvbS8_Z3VjZV9yZWZlcnJlcj1hSFIwY0hNNkx5OTNkM2N1WjI5d'\n 'loyeGxMbU52YlM4Jmd1Y2VfcmVmZXJyZXJfc2lnPUFRQUFBRG1vS3ROMkF5bzFpTDRpd29Td0Z4Z0NDTVN'\n 'XU3M0UkNoa2pBcGl2NmxobmxJcWRab0JIWUF6NVJuNHlZdkN1WTRBNEdwVTRfWjBZQ3JNM1RwX2ZMd05rej'\n 'g0TkVWdksyUzA3LVNmNXdndUJCUjhieG5sZEN4dGRCRmV6eEZfMnNQdEpQeXJ6UzREeV9WRUF4ZXNUMXNLYz'\n 'lnTm1pSlFCV3R6LVpLX0hvc2p5Jl9ndWNfY29uc2Vud'\n 'F9za2lwPTE1OTcwODc3MTg&guce_referrer_sig=AQAAAKzjjM2--Diw1M3gykrGHjIn9NdqSch_odxmo6xqtgD4pNo'\n 'anrEQBgPoZ9xkh8HPYFN1_9mpio4Fg2tEGa4GrsK69bHe4yN9LactTwdKEuBxazZPO751TNSeFH_lltkNoN1k7D6I978v'\n '1eXB9WaCp0NUgbRZRmbYEdoZmkmQvUq7&_guc_consent_skip=1597087949')\n if r.status_code != 200:\n raise ConnectionError\n else:\n soup = BeautifulSoup(r.text, 'html.parser')\n price_elem = soup.find('span', {\"class\": \"Trsdu(0.3s) Fw(b) Fz(36px) Mb(-4px) D(ib)\"})\n return float(price_elem.text)", "def get(yahoo_code,inicio,fin):\n #######################################\n #inicio=(aaaa,mm,dd), fin=(aaaa,mm,dd)#\n #######################################\n # connection parameters\n # ----------------------------------- #\n\n # http timeout\n timeout_secs = 5\n\n # retries\n num_retries = 4\n\n # url encoding\n yahoo_url = r'https://finance.yahoo.com/quote/{0}/history?p={0}'.format(yahoo_code)\n\n # init headers\n headers = dict()\n headers['Connection'] = 'keep-alive'\n headers['Upgrade-Insecure-Requests'] = '1'\n headers['User-Agent'] = r\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36\"\n\n\n # manejo de conexion\n # ----------------------------------- #\n\n csv_data = None\n while num_retries>0:\n\n try:\n\n session = requests.Session()\n\n r = session.get(yahoo_url,headers=headers,timeout=timeout_secs)\n r.encoding = 'utf-8'\n html_text = r.text\n\n # get crumb\n pattern = r\"(\\\"CrumbStore\\\":{\\\"crumb\\\":\\\")([^\\\"]+)(\\\"})\"\n m = re.search(pattern, html_text)\n crumb = m.group(2).replace(\"\\\\u002F\",\"/\")\n\n # Obtener datos desde inicio=(aaaa,mm,dd) (UTC)\n start_time = calendar.timegm(datetime(inicio[0],inicio[1],inicio[2]).utctimetuple())\n# #hasta hoy: end_time = calendar.timegm(datetime.now().utctimetuple()) \n end_time = calendar.timegm(datetime(fin[0],fin[1],fin[2]).utctimetuple())\n\n # url para descargar datos\n data_url = r\"https://query1.finance.yahoo.com/v7/finance/download/{0}?period1={1}&period2={2}&interval=1d&events=history&crumb={3}\".format(yahoo_code, start_time, end_time, crumb)\n\n # bajar datos en formato csv\n r = session.get(data_url,headers=headers,timeout=timeout_secs)\n csv_data = csv.reader(r.content.decode().splitlines(),delimiter=',')\n\n except requests.exceptions.Timeout:\n\n wtext = 'Connection timeout, {0} reintentos restantes'.format(str(num_retries))\n\n # print or log\n print(wtext)\n\n except AttributeError:\n\n wtext = 'Error de migajas (crumb error), {0} reintentos restantes'.format(str(num_retries))\n\n # print or log\n print(wtext)\n\n except Exception:\n\n wtext = 'Error genérico, {1} intentos restantes'.format(str(num_retries))\n\n # print or log\n print(wtext)\n\n finally:\n\n if csv_data:\n wtext = 'Los datos para {0} se bajaron sin pedos'.format(yahoo_code)\n\n # print or log\n print(wtext)\n break\n\n else:\n num_retries -= 1\n\n # asset-data\n if csv_data:\n eod_data = []\n for ii,row in enumerate(csv_data):\n\n if ii>0 and not 'null' in row:\n\n eod_data.append({\n 'date': row[0],\n 'open': float(row[1]),\n 'high': float(row[2]),\n 'low': float(row[3]),\n 'close': float(row[4]),\n 'adj_close': float(row[5]),\n 'volume': int(row[6])\n })\n\n else:\n\n wtext = 'No se pudo descargar {0} :c'.format(yahoo_code)\n\n # print or log\n print(wtext)\n\n return eod_data", "def updateStockInfo(df, date):\n date = getLastBDay(date)\n\n for index, row in df.iterrows():\n try:\n stockData = data.DataReader(row['Symbol'],\n 'yahoo',\n date,\n date)\n floatNum = df.loc[df['Symbol']==row['Symbol'], ['Float']].values[0]\n\n df.at[index,'Last Sale'] = stockData['Close']\n df.at[index,'Market Cap'] = stockData['Close'] * float(floatNum) if floatNum else 0\n df.at[index, 'Has Data'] = 1\n except (RemoteDataError, KeyError):\n df.at[index, 'Has Data'] = 0\n print('No Data found for {0}'.format(index))\n return [df.to_dict()]", "def fetch_all_stocks(config):\n start_date_dt = datetime.datetime.strptime(config['start_date'], '%Y%m%d').date()\n end_date_dt = datetime.datetime.strptime(config['end_date'], '%Y%m%d').date()\n\n with open(DATA_DIR / 'nasdaq_screener_1619356287441.csv', 'r') as fp:\n reader = csv.reader(fp)\n next(reader) # skip header\n symbols = [row[0] for row in reader]\n\n dir_path = DATA_DIR / 'nasdaq_historical' / '{}_{}'.format(config['start_date'], config['end_date'])\n if not os.path.exists(dir_path):\n os.mkdir(dir_path)\n\n for symbol in symbols:\n filename = '{}.csv'.format(symbol)\n full_path = dir_path / filename\n if not os.path.exists(full_path):\n try:\n df = web.get_data_yahoo(symbol, config['start_date'], config['end_date'])\n except:\n # print(traceback.format_exc())\n print('Could not load ' + symbol)\n else:\n if df.index[0].date() == start_date_dt and df.index[-1].date() == end_date_dt:\n print('Loaded: ' + symbol)\n df.to_csv(full_path)", "def scrape_descriptions_sync():\n # прочитать Symbols, for symbol in tqdm(symbols)\n # исользовать urllib get запросы на yahoo и полученное записывать в файл с помощью\n # добавить tqdm(symbols)\n\n myheader = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'\n }\n\n symbols = read_symbols()\n YAHOO_HTMLS.mkdir(parents=True, exist_ok=True)\n\n\n for symbol in tqdm(symbols):\n #Example myurl = \"https://finance.yahoo.com/quote/AAPL/profile?p=AAPL\"\n myurl = f'https://finance.yahoo.com/quote/{symbol}/profile?p={symbol}'\n\n try:\n req = request.Request(myurl, headers=myheader)\n response = request.urlopen(req)\n text = response.read()\n response.close()\n\n except Exception:\n print(\"Error occuried during web request!!\")\n print(sys.exc_info()[1])\n\n f = open(YAHOO_HTMLS / f'{symbol}.html', 'wb')\n f.write(text)\n f.close()", "def try3():\n path = '/Users/mayankkejriwal/datasets/eswc2016/'\n total = 10\n count = 1\n with gzip.open(path+'freebase-rdf-latest.gz', 'rb') as f:\n for line in f:\n print 'line : ',\n print line\n if count > total:\n break\n count += 1", "def download_stock_price_hist(\n\ttickers = [ 'AAPL' ],\n\tprice_column = 'Adj Close',\t\t\t\t\t\t\t\t# assume it's the Adjusted Close price that are interested\n\tstart = datetime.date( 2009, 12, 31 ),\t\t\t\t# assume start is guaranteed to be a weekday\n\tend = datetime.date( 2015, 12, 31 ),\n\tcsv_file = \"stock_price_test.csv\",\n):\n\t# Check validity of inputs\n\tif len( tickers ) <= 0:\n\t\tprint \"Tickers must not be empty\";\n\t\treturn False;\n\tif start > end:\n\t\tprint \"Start date \" + start.isoformat() + \" can't be later than End date \" + end.isoformat();\n\n\tdf = pd.DataFrame();\t\t\t# data frame to return\n\tfor _i in range( len(tickers) ):\n\t\tticker = tickers[_i];\n\t\tprint \"Index\" + str(_i) + \"\\t\" + \"Ticker: \" + ticker;\n\n\t\tstart_str = start.isoformat();\n\t\tend_str = end.isoformat();\n\t\thist = ystockquote.get_historical_prices( ticker, start_str, end_str );\t# dictionary with date string as the key\n\n\t\t# Get time series of stock prices (Don't sort before forming the Series!!!)\n\t\tdate_index = [];\n\t\tprice_data = [];\n\t\tfor key, val in hist.iteritems():\n\t\t\tdate_index.append( datetime.datetime.strptime( key, \"%Y-%m-%d\" ).date() );\n\t\t\tprice_data.append( float( val[ price_column ] ) )\n\n\t\tif min( date_index ) > start:\t\t\t\t\t\t\t\t# Pass if the no stock price is available on Start\n\t\t\tcontinue;\n\t\tstock_ts = pd.Series( price_data, date_index );\n\t\tstock_ts = stock_ts.sort_index();\n\n\t\t# Add current stock TS to the DataFrame\n\t\tdf[ticker] = stock_ts;\n\t\n\tdf.to_csv( csv_file, index_label='Date' );\n\treturn True;", "def _get_financials_by_chunk(self, args):\n (istart, iend) = args\n comp_index = self.components.index\n # download financials\n browser=webdriver.Chrome()\n for sym in comp_index[istart:iend]:\n print('Chunk %s-%s: downloading financial data for %s' %(comp_index[istart], comp_index[iend], sym))\n stock = Symbol(sym)\n if 'Exchange' in self.components.columns:\n exch = self.components['Exchange'][sym]\n if type(exch) == pd.Series:\n # unexpected duplicates, e.g. AMOV\n exch = exch.iloc[0]\n if type(exch) == str:\n stock.exch = exch\n stock.get_financials(browser=browser)\n stock.save_financial_data()\n browser.quit()\n return" ]
[ "0.63830006", "0.6112706", "0.6052205", "0.60002667", "0.5892348", "0.58545315", "0.58131224", "0.5802235", "0.5686907", "0.56537914", "0.5645122", "0.5633165", "0.5632974", "0.55785567", "0.5570575", "0.5567166", "0.5490028", "0.54742014", "0.5439857", "0.53706014", "0.53656757", "0.5364127", "0.535001", "0.5331644", "0.5324182", "0.5310267", "0.5304949", "0.5298617", "0.52819216", "0.5266414" ]
0.6356083
1
Downloads stock data and returns dict of pandas dataframes. First checks if data is up to date, if so, just loads the data.
def download_stocks(stocklist=STOCKLIST, fresh=False): # load stocklist with open(stocklist) as f: stocks = f.read().strip('\n').split('\n') dfs = {} for s in stocks: print(s) stockfile = '../stockdata/' + s + '.csv.gz' if fresh or not os.path.exists(stockfile): print('downloading fresh') stock = quandl.get('EOD/' + s) stock.to_csv(stockfile, compression='gzip') dfs[s] = stock continue else: stock = pd.read_csv(stockfile, index_col=0) stock.index = pd.to_datetime(stock.index) timedelta_step = 1 if HOUR > 2 and WEEKDAY not in [5, 6]: # for mtn time timedelta_step = 0 elif WEEKDAY == 0: # it's monday timedelta_step = 3 # can be up to last friday elif WEEKDAY in [5, 6]: # if a weekend, last data is from friday timedelta_step = WEEKDAY - 4 print('date gap:', TODAY.date() - stock.iloc[-2:].index[-1].date()) print('step, timedelta:', timedelta_step, datetime.timedelta(timedelta_step)) if (TODAY.date() - stock.iloc[-2:].index[-1].date()) <= datetime.timedelta(timedelta_step): dfs[s] = stock print('latest date close enough to up-to-date:') print(stock.iloc[-2:].index[-1].date()) print('not downloading') print('') continue else: print('latest date is') print(stock.iloc[-2:].index[-1].date()) print('downloading fresh') stock = quandl.get('EOD/' + s) stock.to_csv(stockfile, compression='gzip') dfs[s] = stock return dfs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_all_stocks():\n stocks = get_stocklist()\n dfs = {}\n for i, r in stocks.iterrows():\n start = time.time()\n s = r['Ticker']\n stockfile = '../stockdata/' + s + '.csv.gz'\n print('downloading', s)\n stock = quandl.get('EOD/' + s)\n stock.to_csv(stockfile, compression='gzip')\n dfs[s] = stock\n print('took', time.time() - start, 's')\n\n return dfs", "def get_dataframes(symbols=(\"sne\", \"goog\", \"tsla\"), source='yahoo', refresh=False):\n symbols = util.make_symbols(list(symbols))\n if refresh:\n symbols_to_refresh = symbols\n else:\n symbols_to_refresh = [sym for sym in symbols if not Equity.objects.filter(symbol=sym).exists()]\n source = source.lower().strip()\n if source in ('yahoo', 'google'):\n source += '_finance'\n if source[:3] == 'fed':\n source = 'federal_reserve_economic_data'\n ccpanda = ccp.ConcurrentPandas()\n # set the data source\n getattr(ccpanda, \"set_source_\" + source)()\n if symbols_to_refresh:\n # tell concurrent pandas which keys/symbols to retrieve\n ccpanda.insert_keys(symbols_to_refresh)\n # start concurrentpandas threads\n ccpanda.consume_keys_asynchronous_threads()\n # FIXME: is there a better/faster iterator to use like `ccpanda.output_map` attribute?\n pseudodict = ccpanda.return_map()\n else:\n pseudodict = {}\n table = {}\n for sym in symbols:\n e, created = None, False\n if not sym in symbols_to_refresh:\n e, created = Equity.objects.get_or_create(symbol=sym)\n if created or not e or not e.time_series or sym in symbols_to_refresh:\n e, created = Equity.objects.get_or_create(\n symbol=sym,\n name=sym, # FIXME: use data source to find equity name!\n time_series=pseudodict[sym].to_json(),\n )\n table[sym] = pd.io.json.read_json(path_or_buf=e.time_series, orient='columns', typ='frame', convert_dates=True)\n return table", "def get_data():\n \n \"\"\" Prepare variables\"\"\"\n urls = {\"cases\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv\",\n \"deaths\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv\",\n \"recovered\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Recovered.csv\"}\n\n localnames = {\"cases\": \"Cases.csv\",\n \"deaths\": \"Deaths.csv\",\n \"recovered\": \"Recovered.csv\"}\n\n dfs = {\"cases\": None,\n \"deaths\": None,\n \"recovered\": None}\n\n \"\"\" Download\"\"\"\n for key in urls.keys():\n url = urls[key]\n localname = localnames[key]\n urllib.request.urlretrieve(url, localname)\n\n \"\"\" Load variables\"\"\"\n for key in dfs.keys():\n dfs[key] = pd.read_csv(localnames[key])\n \n \"\"\" Return\"\"\"\n return(dfs)", "def get_data(symbols, dates):\n df = pd.DataFrame(index=dates)\n if 'SPY' not in symbols: # add SPY for reference, if absent\n symbols.insert(0, 'SPY')\n\n for symbol in symbols:\n # TODO: Read and join data for each symbol\n if os.path.isfile(symbol_to_path(symbol)): \n df_temp = pd.read_csv(symbol_to_path(symbol), index_col='Date', \n parse_dates = True, usecols=['Date', 'Adj Close'], na_values=['nan'])\n df_temp = df_temp.rename(columns = {'Adj Close': symbol})\n df = df.join(df_temp)\n if symbol == 'SPY': #drop dates SPY did not trade\n df = df.dropna(subset=[\"SPY\"])\n# else:\n# download_symbol(symbol) \n return df", "def get_stocks():\n print(\"fetching remote...\")\n code_dataframes = pd.read_html(\n 'http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0]\n # 우리가 필요한 것은 회사명과 종목코드이기 때문에 필요없는 column들은 제외해준다.\n print(\"parsing and filtering data...\")\n code_dataframes.종목코드 = code_dataframes.종목코드.map('{:06d}'.format)\n # 한글로된 컬럼명을 영어로 바꿔준다.\n code_dataframes = code_dataframes[['회사명', '종목코드']]\n code_dataframes = code_dataframes.rename(\n columns={'회사명': 'name', '종목코드': 'code'})\n codes = code_dataframes['code']\n names = code_dataframes['name']\n stocks = []\n for i in range(len(names)):\n stocks.append({\n 'name': names[i],\n 'code': codes[i]\n })\n return stocks", "def get_stock_data_frame(time, stock):\n\n print(\"Getting\", time, \"stock data for\", stock)\n url = 'https://api.iextrading.com/1.0/stock/'+stock+'/chart/'+time\n req = requests.get(url)\n print(url)\n\n print(\"Parsing data.\")\n rjson = req.text\n\n rdata = json.loads(rjson)\n\n dates = []\n openprices = []\n highprices = []\n lowprices = []\n closeprices = []\n volumes = []\n\n for i in rdata:\n date = i['date']\n dates.append(date)\n openprices.append(float(i['open']))\n highprices.append(float(i['high']))\n lowprices.append(float(i['low']))\n closeprices.append(float(i['close']))\n volumes.append(float(i['volume']))\n\n index = pd.DatetimeIndex(dates, dtype='datetime64[ns]')\n _open = pd.Series(openprices, index=index)\n high = pd.Series(highprices, index=index)\n low = pd.Series(lowprices, index=index)\n close = pd.Series(closeprices, index=index)\n data_frame_data = {'Open' : _open, 'High' : high, 'Low' : low, 'Close' : close}\n\n return pd.DataFrame(data_frame_data)", "def gather_stock_data(tickers, save=True):\n prices = pd.DataFrame()\n ts = TimeSeries(key='EY2QBMV6MD9FX9CP', output_format='pandas')\n\n for ticker in tickers:\n successful_grab = False\n ticker_daily_adj = None\n\n while successful_grab is not True:\n try:\n ticker_daily_adj = ts.get_daily_adjusted(ticker, outputsize='full')[0]\n successful_grab = True\n except ValueError:\n print('Waiting for API to let me in')\n time.sleep(10)\n\n ticker_daily_adj.loc[:, '0. ticker'] = ticker\n ticker_daily_adj = ticker_daily_adj[sorted(ticker_daily_adj.columns)]\n\n prices = pd.concat([prices, ticker_daily_adj])\n\n prices.sort_index(inplace=True)\n prices.reset_index(inplace=True)\n prices['date'] = pd.to_datetime(prices['date'])\n if save:\n prices.to_csv('stockdata.csv', index=True)\n\n return prices", "def _get_data(self):\n\n data = self.get_data()\n\n required_data = ['open','close','open_date','high','low']\n if not np.isin(required_data, data.columns).all():\n raise ImplementationError(f'''\n Data must contain columns: {required_data}\n ''')\n\n data = data.sort_values('open_date')\n data.index = data.open_date\n\n temp_dates = pd.unique(data.open_date)\n self.total_candles = len(temp_dates)\n self.start_date, self.end_date = min(temp_dates), max(temp_dates)\n\n # Divide df based on symbol, create DataEngine object, add to dict.\n data_dict = {}\n for symbol in self.symbols.symbol:\n try:\n data_dict[symbol] = DataEngine(data[data.symbol == symbol])\n except DiscontinuousError as err:\n print(f'There are missing dates in data for {symbol}')\n raise err\n except ValueError as err:\n print(f'No data for provided for symbol: {symbol}')\n self.symbols = self.symbols.drop(symbol)\n\n return data_dict", "def fetch_all_stocks(config):\n start_date_dt = datetime.datetime.strptime(config['start_date'], '%Y%m%d').date()\n end_date_dt = datetime.datetime.strptime(config['end_date'], '%Y%m%d').date()\n\n with open(DATA_DIR / 'nasdaq_screener_1619356287441.csv', 'r') as fp:\n reader = csv.reader(fp)\n next(reader) # skip header\n symbols = [row[0] for row in reader]\n\n dir_path = DATA_DIR / 'nasdaq_historical' / '{}_{}'.format(config['start_date'], config['end_date'])\n if not os.path.exists(dir_path):\n os.mkdir(dir_path)\n\n for symbol in symbols:\n filename = '{}.csv'.format(symbol)\n full_path = dir_path / filename\n if not os.path.exists(full_path):\n try:\n df = web.get_data_yahoo(symbol, config['start_date'], config['end_date'])\n except:\n # print(traceback.format_exc())\n print('Could not load ' + symbol)\n else:\n if df.index[0].date() == start_date_dt and df.index[-1].date() == end_date_dt:\n print('Loaded: ' + symbol)\n df.to_csv(full_path)", "def get_data_for_ticker(ticker):\n logger.debug(f'processing get_data_for_ticker({ticker})')\n df_data = get_existing_data_for_ticker(ticker)\n start_date, end_date = get_ticker_start_and_end_dates(df_data)\n logger.debug(f'retrieving for {ticker} from {start_date} to {end_date}')\n df_new_data = pd.DataFrame()\n if start_date != end_date:\n df_new_data = ping_yahoo_for_ticker(ticker, start_date, end_date)\n if df_data.empty:\n df_data = df_new_data\n else:\n df_data = df_data.append(df_new_data)\n return df_data", "def load_old_value_dfs(latest_data_path: str) -> Dict[str, pd.DataFrame]:\n df_dict_old = {}\n latest_values_path = Path(latest_data_path) / \"values\"\n for file in latest_values_path.iterdir():\n file_name = file.name[:-11] # truncate the timestamp\n df_dict_old[file_name] = pd.read_parquet(file)\n\n logger.debug(f\"{len(list(df_dict_old.items()))} previous dataframes loaded.\")\n return df_dict_old", "def load_data(data_path):\n stock_news_df = pd.read_csv(data_path + 'news_reuters.csv', header=None,\n names=['tickers', 'company', 'date', 'headline', 'first_sent', 'priority'])\n\n with open(data_path + 'stockReturns.json') as f:\n stock_price = json.load(f)\n stock_price_df = pd.DataFrame(stock_price)\n\n return stock_news_df, stock_price_df", "def stock_data(ticker, start,today=date.today()):\n df= web.DataReader(ticker,'yahoo',start,today)\n return df", "def _load_pricehistory(symbol) -> Dict[date, Dict[str, float]]:\n with open(f\"tmp/{symbol}-pricehistory.pickle\", \"rb\") as f:\n data = {k.isoformat(): v for k, v in pickle.load(f).items()}\n return data", "def _fetch_data(url: str, d: datetime) -> pd.DataFrame:\n return pd.read_json(url)", "def get_existing_data_for_ticker(ticker):\n filename = get_filename_for_ticker(ticker)\n logger.debug(f'Processing {filename}')\n df_ticker_data = pd.DataFrame()\n try:\n df_ticker_data = pd.read_csv(filename, index_col='Date')\n df_ticker_data.index = pd.to_datetime(df_ticker_data.index)\n except FileNotFoundError:\n logger.error(f'Error in opening {filename}')\n except Exception as e:\n logging.error(f'Error {e} while accessing existing data')\n return df_ticker_data", "def portfolio_download_data(tickers: List[str], dates: List[str],\n time_step: str) -> None:\n\n try:\n function_name: str = portfolio_download_data.__name__\n download_data_tools \\\n .function_header_print_data(function_name, tickers, dates,\n time_step)\n\n init_year = int(dates[0].split('-')[0])\n init_month = int(dates[0].split('-')[1])\n fin_year = int(dates[1].split('-')[0])\n fin_month = int(dates[1].split('-')[1])\n last_day = monthrange(fin_year, fin_month)[1]\n\n init_date: dt = dt(year=init_year, month=init_month, day=1)\n fin_date: dt = dt(year=fin_year, month=fin_month, day=last_day)\n\n # Not all the periods can be combined with the time steps.\n raw_data: pd.DataFrame = \\\n yf.download(tickers=tickers, start=init_date, end=fin_date,\n interval=time_step)['Adj Close']\n # Order DataFrame columns by sector\n raw_data = raw_data[tickers]\n\n if raw_data.isnull().values.any():\n # Remove stocks that do not have data from the initial date\n raw_data = raw_data.dropna(axis=1, thresh=len(raw_data) - 10) \\\n .fillna(method='ffill')\n\n download_data_tools.save_data(raw_data, dates, time_step)\n\n except AssertionError as error:\n print('No data')\n print(error)", "def get_stock_df(instrument_id: str\n , start_date: any\n , end_date: any\n , load_from_cache=True\n , save_to_cache=True\n , cache_dir=_DEFAULT_CACHE_DIR\n , return_only_df=True):\n\n if not instrument_id.startswith('HEX'):\n raise ValueError(f'Invalid instrument name {instrument_id}')\n\n _validate_dates(start_date, end_date)\n\n # Convert datetimes to ISO format strings if they aren't already.\n start_date = start_date.isoformat()[:10] if isinstance(start_date, datetime) else start_date\n end_date = end_date.isoformat()[:10] if isinstance(end_date, datetime) else end_date\n\n file_path = _get_instrument_cache_file_path(instrument_id, start_date, end_date, cache_dir)\n\n if load_from_cache or save_to_cache:\n _create_dir_if_not_exists(cache_dir)\n\n if load_from_cache:\n cache_files = os.listdir(cache_dir)\n cached_file = _get_cached_instrument_file(cache_files, instrument_id, start_date,\n end_date)\n\n if cached_file is not None:\n cached_file = os.path.join(_DEFAULT_CACHE_DIR, cached_file)\n _log.info('Loading from cache')\n\n with open(cached_file, 'rb') as f:\n cached_data = pickle.load(f)\n return cached_data['Value'] if return_only_df else cached_data\n\n params = {\n 'SubSystem' : 'History',\n 'Action' : 'GetChartData',\n 'FromDate' : start_date,\n 'ToDate' : end_date,\n 'json' : True,\n 'showAdjusted' : True,\n 'app' : '/osakkeet/historiallisetkurssitiedot-HistoryChar',\n 'DefaultDecimals': False,\n 'Instrument' : instrument_id\n }\n\n r = requests.get(_API_URL, params)\n json_result = r.json()\n status = int(json_result['@status'])\n if status != 1:\n raise ValueError(f'Invalid status {status} or instrument {instrument_id}')\n\n json_data = json_result['data'][0]\n json_stock_name = json_data['instData']['@nm']\n json_company_name = json_data['instData']['@fnm']\n\n json_stock_value = json_data['chartData']['cp']\n\n # Create the stock DataFrame.\n pd_stock_value = pd.DataFrame(json_stock_value, columns=['Timestamp', 'Value'])\n\n # Create epoch timestamp and datetime columns.\n timestamps = pd_stock_value['Timestamp'].values // 1000\n pd_stock_value.loc[:, 'Timestamp'] = timestamps\n timestamps_dt = [datetime.fromtimestamp(x) for x in timestamps]\n pd_stock_value['DateTime'] = pd.to_datetime(timestamps_dt)\n\n result = {\n 'Company': json_company_name,\n 'Stock' : json_stock_name,\n 'Value' : pd_stock_value\n }\n\n if save_to_cache:\n _log.info('Storing to cache')\n with open(file_path, 'wb+') as f:\n pickle.dump(result, f)\n\n return result['Value'] if return_only_df else result", "def get_data(symbols, dates, base_dir=\"../data/\"):\n df = pd.DataFrame(index=dates)\n if 'SPY' not in symbols:\n symbols.insert(0, 'SPY')\n\n for symbol in symbols:\n temp_df = pd.read_csv(symbol_to_path(symbol, base_dir), index_col='Date',\n parse_dates=True, usecols=['Date', 'Close'],\n na_values=['nan'])\n temp_df = temp_df.rename(columns={'Close': symbol})\n df = df.join(temp_df, how='inner')\n return df", "def get_data(stockSymbol, full_data=False, start_date=None, end_date=None, check_stockSymbol=True):\n\n if(check_stockSymbol is True):\n check_name(stocks_values, stocks_values, stockSymbol)\n\n stockSymbol = stockSymbol.replace('&', '%26')\n symbolCount = scrape_symbolCount(stockSymbol)\n\n if(full_data is True):\n\n print(\"Downloading Full data for\", stockSymbol)\n x = datetime.datetime.strptime('1-1-1992', \"%d-%m-%Y\")\n y = datetime.datetime.today()\n\n else:\n\n if(start_date is None or end_date is None):\n raise ValueError(\"Provide start and end date.\")\n\n x = parse_date(start_date)\n y = parse_date(end_date)\n\n if(x > y):\n raise ValueError(\"Starting date is greater than end date.\")\n\n result = scrape_data(\n x, y, 'stock', stockSymbol=stockSymbol, symbolCount=symbolCount)\n return result", "def download_stock_price_hist(\n\ttickers = [ 'AAPL' ],\n\tprice_column = 'Adj Close',\t\t\t\t\t\t\t\t# assume it's the Adjusted Close price that are interested\n\tstart = datetime.date( 2009, 12, 31 ),\t\t\t\t# assume start is guaranteed to be a weekday\n\tend = datetime.date( 2015, 12, 31 ),\n\tcsv_file = \"stock_price_test.csv\",\n):\n\t# Check validity of inputs\n\tif len( tickers ) <= 0:\n\t\tprint \"Tickers must not be empty\";\n\t\treturn False;\n\tif start > end:\n\t\tprint \"Start date \" + start.isoformat() + \" can't be later than End date \" + end.isoformat();\n\n\tdf = pd.DataFrame();\t\t\t# data frame to return\n\tfor _i in range( len(tickers) ):\n\t\tticker = tickers[_i];\n\t\tprint \"Index\" + str(_i) + \"\\t\" + \"Ticker: \" + ticker;\n\n\t\tstart_str = start.isoformat();\n\t\tend_str = end.isoformat();\n\t\thist = ystockquote.get_historical_prices( ticker, start_str, end_str );\t# dictionary with date string as the key\n\n\t\t# Get time series of stock prices (Don't sort before forming the Series!!!)\n\t\tdate_index = [];\n\t\tprice_data = [];\n\t\tfor key, val in hist.iteritems():\n\t\t\tdate_index.append( datetime.datetime.strptime( key, \"%Y-%m-%d\" ).date() );\n\t\t\tprice_data.append( float( val[ price_column ] ) )\n\n\t\tif min( date_index ) > start:\t\t\t\t\t\t\t\t# Pass if the no stock price is available on Start\n\t\t\tcontinue;\n\t\tstock_ts = pd.Series( price_data, date_index );\n\t\tstock_ts = stock_ts.sort_index();\n\n\t\t# Add current stock TS to the DataFrame\n\t\tdf[ticker] = stock_ts;\n\t\n\tdf.to_csv( csv_file, index_label='Date' );\n\treturn True;", "def updateStockInfo(df, date):\n date = getLastBDay(date)\n\n for index, row in df.iterrows():\n try:\n stockData = data.DataReader(row['Symbol'],\n 'yahoo',\n date,\n date)\n floatNum = df.loc[df['Symbol']==row['Symbol'], ['Float']].values[0]\n\n df.at[index,'Last Sale'] = stockData['Close']\n df.at[index,'Market Cap'] = stockData['Close'] * float(floatNum) if floatNum else 0\n df.at[index, 'Has Data'] = 1\n except (RemoteDataError, KeyError):\n df.at[index, 'Has Data'] = 0\n print('No Data found for {0}'.format(index))\n return [df.to_dict()]", "def get_data(retrieve = False, start='2019-01-01', comp = False):\r\n if retrieve == True:\r\n tickers = retrieve_sp500()\r\n else:\r\n with open('sp500_tickers.pickle', 'rb') as file:\r\n tickers = pickle.load(file)\r\n if not os.path.exists('sp500_data'):\r\n os.mkdir('sp500_data')\r\n exchg_close = dt.time(16,0,0,0)\r\n # use todays date if markets have closed.\r\n if dt.datetime.today().time() > exchg_close:\r\n end = dt.datetime.now()\r\n # use yesterdays dates if markets have not yet closed.\r\n else: \r\n end = dt.datetime.now() - dt.timedelta(1)\r\n for ticker in tickers:\r\n # updates data for tickers not currently stored.\r\n if not os.path.exists('sp500_data/{}.csv'.format(ticker)):\r\n df = pdr.get_data_yahoo(ticker, start, end)\r\n df.to_csv('sp500_data/{}.csv'.format(ticker))\r\n # updates data for tickers that have not been updated today.\r\n elif dt.datetime.fromtimestamp(os.path.getmtime('sp500_data/{}.csv'.format(ticker))).day != dt.datetime.today().day:\r\n df = pdr.get_data_yahoo(ticker, start, end)\r\n df.to_csv('sp500_data/{}.csv'.format(ticker))\r\n # prints out data that was not and does not need udpating.\r\n else:\r\n print('{} is already saved'.format(ticker))\r\n if comp == True:\r\n compile_data()", "def get_stock_historical_data(symbol, config):\n start_date_dt = datetime.datetime.strptime(config['start_date'], '%Y%m%d').date()\n end_date_dt = datetime.datetime.strptime(config['end_date'], '%Y%m%d').date()\n\n filename = '{}.csv'.format(symbol)\n dir_path = DATA_DIR / 'nasdaq_historical' / '{}_{}'.format(config['start_date'], config['end_date'])\n if not os.path.exists(dir_path):\n os.mkdir(dir_path)\n\n full_path = dir_path / filename\n if os.path.exists(full_path):\n df = pd.read_csv(full_path, header=0, index_col='Date', parse_dates=True)\n else:\n df = web.get_data_yahoo(symbol, config['start_date'], config['end_date'])\n if df.index[0].date() == start_date_dt and df.index[-1].date() == end_date_dt:\n df.to_csv(full_path)\n\n df.attrs.update(config)\n df.attrs['stock_symbol'] = symbol\n df.attrs['start_date_dt'] = datetime.datetime.strptime(config['start_date'], '%Y%m%d').date()\n df.attrs['end_date_dt'] = datetime.datetime.strptime(config['end_date'], '%Y%m%d').date()\n df.attrs['date_dir'] = '{}_{}'.format(config['start_date'], config['end_date']) # for building paths\n return df", "def get_tickers():\n\turl = \"https://api.iextrading.com/1.0/ref-data/symbols\"\n\t\n\ttry:\n\t\tresponse = requests.get(url)\n\t\tif str(response.status_code) == \"200\":\n\t\t\tprint(\"[UPDATE]: Downlaoding Tickers from iextrading API\")\n\t\t\tjson_stock_data = response.json()\n\n\t\t\tpd_stock = pandas.DataFrame(json_stock_data)\n\t\t\t# DataFrame Format\n\t\t\t# date iexId isEnabled name symbol type\n\t\t\t# 0 2019-02-12 2 True Agilent Technologies Inc. A cs\n\n\t\t\tprint(\"[SUCCESS]: Downloaded {} symbols from IEX.\".format(len(pd_stock.index)))\n\n\t\t\treturn pd_stock\n\n\t\telse:\n\t\t\tprint(\"[ERROR]: Download from IEX failed.\")\n\t\t\treturn \"ERROR\"\n\texcept Exception as e:\n\t\tprint(\"[ERROR]: {}\".format(e))\n\t\treturn \"ERROR\"", "def download_entire_db(storage_path=DEFAULT_STORAGE,\n remove_previous=True,\n return_df=False,\n return_latest_date=False,\n write=['feather']):\n # first check if we have the latest data\n if not os.path.exists(storage_path):\n splitpath = storage_path.split('/')[1:] # first entry is blank due to home dir /\n for i, p in enumerate(splitpath, 1):\n path = '/'.join(splitpath[:i])\n if not os.path.exists(path):\n os.mkdir(path)\n\n zip_file_url = 'https://www.quandl.com/api/v3/databases/EOD/data?api_key=' + Q_KEY\n s = req.Session()\n s.mount('https', HTTPAdapter(max_retries=10))\n r = s.get(zip_file_url)\n # another possible way to deal with retries\n # while True:\n # try:\n # r = req.get(zip_file_url, timeout=10)\n # break\n # except Exception as e:\n # print(e)\n\n z = zipfile.ZipFile(io.BytesIO(r.content))\n z.extractall(path=storage_path)\n df = pd.read_csv(storage_path + \\\n z.filelist[0].filename,\n names=HEADERS,\n index_col=1,\n parse_dates=True,\n infer_datetime_format=True)\n latest_date = df.index.max().date().strftime('%Y%m%d')\n\n if 'hdf5' in write:\n df.to_hdf(storage_path + 'EOD_' + latest_date + '.h5',\n key='data',\n complib='blosc',\n complevel=9)\n\n # also write feather file so can read into R\n # have to reset the index because feather can't handle non-default index (maybe non-unique?)\n df.reset_index(inplace=True)\n if 'feather' in write:\n df.to_feather(storage_path + 'EOD_' + latest_date + '.ft')\n\n if remove_previous:\n for ext in ['h5', 'ft']:\n files = glob.glob(storage_path + 'EOD_*.' + ext)\n files = [f for f in files if len(f.split('/')[-1]) == 15] # don't want any of the small files, only full DBs\n print(sorted(files, key=os.path.getctime))\n if len(files) > 1:\n previous_file = sorted(files, key=os.path.getctime)[-2]\n print('removing', previous_file)\n os.remove(previous_file)\n\n # delete downloaded zip file\n os.remove(storage_path + z.filelist[0].filename)\n\n if return_df:\n # set index back to normal for return_df\n df.set_index('Date', inplace=True)\n return df\n elif return_latest_date:\n return pd.to_datetime(df['Date'].max().date())", "def read_historical_data():\n pull_dir = \"/common/quidel-historical-raw\"\n columns = ['SofiaSerNum', 'TestDate', 'Facility', 'ZipCode',\n 'FluA', 'FluB', 'StorageDate']\n df = pd.DataFrame(columns=columns)\n\n for fn in os.listdir(pull_dir):\n if \"xlsx\" in fn:\n newdf = pd.read_excel(\"/\".join([pull_dir, fn]))\n df = df.append(newdf[columns])\n return df", "def get_stock_data():\n if not os.path.exists('./catalog/stock_data'):\n os.mkdir('./catalog/stock_data')\n \n inventory_data = {}\n inventory_file = './catalog/stock_data/inventory-bro.txt'\n \n download_data = True\n if os.path.exists(inventory_file):\n # Check that inventory file is no more than 1 day old\n filestat = os.stat(inventory_file)\n tm = datetime.datetime.fromtimestamp(filestat.st_mtime)\n today = datetime.datetime.now()\n dt = today - tm\n if dt.days < 1:\n download_data = False\n \n if download_data:\n # Get inventory data from ftp site\n from ftplib import FTP_TLS\n print 'Downloading inventory-bro.txt ....'\n ftps = FTP_TLS('ftp.appareldownload.com')\n ftps.login('Br0d3r', 'Br0d3r2oll')\n ftps.prot_p()\n #ftps.retrlines('LIST')\n ftps.retrbinary('RETR inventory-bro.txt', open(inventory_file, 'wb').write)\n ftps.quit()\n \n print \"Parse inventory-bro.txt ... \"\n first_row = None\n for row in csv.reader(open(inventory_file, 'rb')):\n itemRef = row[4].lower()\n if itemRef == 'style number':\n # save first row to be used as column header\n first_row = row\n continue\n \n source_attribs = [{'attribute_type': 'source', 'attribute_value': 'broderbros'}]\n \n inventory_data.setdefault(itemRef, [])\n \n color = row[8].lower()\n size = row[10].lower()\n \n # Warehouses starts at column 13\n for i in range(13, len(first_row)):\n wh_name = first_row[i]\n options = [\n {'option_type': 'color', 'option_value': color, 'attributes': []},\n {'option_type': 'size', 'option_value': size, 'attributes': []},\n {'option_type': 'warehouse', 'option_value': wh_name, 'attributes': source_attribs, 'shared': True},\n {'option_type': 'vendor', 'option_value': 'broderbros', 'attributes': source_attribs, 'shared': True},\n ]\n inventory_data[itemRef].append({'options': options, 'inventory': row[i]})\n \n # Pricing data\n pricing_tarfile = \"./catalog/stock_data/bro-AllStyles_R06.tar.gz\"\n download_data = True\n if os.path.exists(pricing_tarfile):\n # Check that file is no more than 1 day old\n filestat = os.stat(pricing_tarfile)\n tm = datetime.datetime.fromtimestamp(filestat.st_mtime)\n today = datetime.datetime.now()\n dt = today - tm\n if dt.days < 1:\n download_data = False\n \n if download_data:\n print 'Downloading items.csv for price data ....'\n br = utils.create_browser(1, 2)\n br.open(\"https://www.broderbros.com/cgi-bin/online/webbro/bro-index.w\")\n try:\n # Fill login form\n br.select_form(name = 'frmLogin')\n frm = br.form\n \n ctrl = frm.find_control('userName')\n ctrl.value = USERNAME\n ctrl = frm.find_control('password')\n ctrl.value = PASSWORD\n \n # Submit login form\n if TESTRUN: print 'Submit Login Form'\n \n br.select_form(name = 'frmLogin')\n br.submit()\n except:\n print \"Login form does not exist, please check URL, downloaded html or site is down\"\n return None\n try:\n tar_url = \"https://www.broderbros.com/cgi-bin/download/webshr/prod-info-view.w?f=bro-AllStyles_R06.tar.gz\"\n br.retrieve(tar_url, pricing_tarfile)\n except:\n print \"Error when downloading pricing file\"\n return None\n \n try:\n tar = tarfile.open(pricing_tarfile)\n for member in tar.getmembers():\n member.name = member.name.split('/')[-1] # strip directory from filename\n tar.extractall('catalog/stock_data/bro-AllStyles_R06')\n tar.close()\n except:\n print \"Error when extracting items.csv\"\n return None\n \n f_object = open('./catalog/stock_data/bro-AllStyles_R06/items_R06.csv', 'rb')\n #~ f_object = open('items_R06.csv', 'rb')\n \n print \"Parse items_R06.csv ... \"\n for row in csv.reader(f_object):\n itemRef = row[7].lower()\n if itemRef == 'style code':\n continue\n \n size = row[8].lower()\n color = row[11].lower()\n price = row[18]\n \n item_data = inventory_data.get(itemRef)\n if not item_data:\n continue\n # Find data with same size and color\n for var_dict in item_data:\n options = var_dict['options']\n opt_dict = {}\n for opt in options:\n opt_type = opt['option_type']\n opt_value = opt['option_value']\n if opt_type == 'size':\n opt_dict['size'] = opt_value\n elif opt_type == 'color':\n opt_dict['color'] = opt_value\n if opt_dict['size'] == size and opt_dict['color'] == color:\n var_dict['price'] = [{'price_type': 'retail_price', 'price': price}]\n \n f_object.close()\n \n try:\n shutil.rmtree(\"./catalog/stock_data/bro-AllStyles_R06\")\n #~ os.remove(\"./catalog/stock_data/bro-AllStyles_R06.tar.gz\")\n except:\n pass\n \n return inventory_data", "def ShyRetrieve(symbol, minDate=None, downloadMissing=None):\n import os\n for file in os.listdir(\"Cache\"):\n if file == f\"{symbol}.csv\":\n print(f\"Parsing {symbol} from local drive.\")\n stock = Stock.ParseCSV(\"Cache/\" + file)\n if len(stock.history) != 0 and (minDate == None or stock.history[-1].date >= minDate):\n return stock\n\n def okayToDownload():\n if minDate != None:\n return True\n if downloadMissing != None:\n return downloadMissing\n print(f\"{symbol} not found in local drive. Okay to download from yfinance? (y/n)\")\n response = input()\n if response.lower() == \"y\":\n return True\n elif response.lower() == \"n\":\n return False\n else:\n print(\"Please respond \\\"y\\\" or \\\"n\\\".\")\n return okayToDownload()\n\n if okayToDownload():\n #stock = Stock.FromYfinance(symbol=symbol)\n stock = Stock(symbol=symbol)\n stock.Update()\n stock.SaveToCSV()\n print(f\"{stock.name} downloaded from yfinance API.\")\n return stock\n else:\n return Stock(symbol=symbol)", "def url2pandas(data_url, product, num_request_blocks):\n\n response = requests.get(data_url) # Get JSON data from URL\n json_dict = response.json() # Create a dictionary from JSON data\n\n df = pd.DataFrame() # Initialize a empty DataFrame\n\n # Error when the requested begin_date and/or end_date does not have data\n large_data_gap_error = 'No data was found. This product may not be offered at this station at the requested time.'\n\n # Handle coops.get_data() request size & errors from COOPS API, cases below:\n # 1. coops.get_data() makes a large request (i.e. >1 block requests)\n # and an error occurs in one of the individual blocks of data\n\n # 2. coops.get_data() makes a large request (i.e. >1 block requests)\n # and an error does not occur in one of the individual blocks of data\n\n # 3. coops.get_data() makes a small request (i.e. 1 request)\n # and an error occurs in the data requested\n\n # 4. coops.get_data() makes a small request (i.e. 1 request)\n # and an error does not occur in the data requested\n\n # Case 1\n if (num_request_blocks > 1) and ('error' in json_dict): \n error_message = json_dict['error'].get('message',\n 'Error retrieving data')\n error_message = error_message.lstrip()\n error_message = error_message.rstrip()\n\n if error_message == large_data_gap_error:\n return df # Return the empty DataFrame\n else:\n raise ValueError(\n json_dict['error'].get('message', 'Error retrieving data'))\n\n # Case 2\n elif (num_request_blocks > 1) and ('error' not in json_dict):\n if product == 'predictions':\n key = 'predictions'\n else:\n key = 'data'\n\n df = json_normalize(json_dict[key]) # Parse JSON dict into dataframe\n\n return df\n\n # Case 3\n elif (num_request_blocks == 1) and ('error' in json_dict):\n raise ValueError(\n json_dict['error'].get('message', 'Error retrieving data'))\n \n # Case 4\n else:\n if product == 'predictions':\n key = 'predictions'\n else:\n key = 'data'\n\n df = json_normalize(json_dict[key]) # Parse JSON dict into dataframe\n\n return df" ]
[ "0.7036214", "0.67121357", "0.6564997", "0.65192896", "0.6485223", "0.6379464", "0.6363341", "0.62925714", "0.625687", "0.62474936", "0.62401634", "0.62270105", "0.62196875", "0.6214444", "0.616288", "0.6152651", "0.610115", "0.6098597", "0.6087366", "0.6072621", "0.60664415", "0.60423094", "0.6033076", "0.60290426", "0.60206187", "0.6007045", "0.5995049", "0.59861046", "0.5976573", "0.5965856" ]
0.6932429
1
makes smaller h5 file with only data after specific time only have historic data for last 2 years for shortsqueeze, so that's an example
def make_small_df(storage_path=DEFAULT_STORAGE, filename='EOD_{}.h5', latest_eod=None, earliest_date='20150101'): if latest_eod is None: latest_eod = get_latest_eod() eod_datapath = storage_path + filename.format(latest_eod) new_filename = storage_path + filename.format(earliest_date + '_' + latest_eod) full_df = pd.read_hdf(eod_datapath, names=HEADERS) # hmm seemed to used to need this, not anymore # full_df['Date'] = pd.to_datetime(full_df['Date'], format='%Y-%m-%d') full_df = full_df[full_df.index > pd.to_datetime(earliest_date, format='%Y%m%d')] full_df.to_hdf(new_filename, key='data', complib='blosc', complevel=9)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_aggregated_ds_v2(file_src,file_dest):\n groups = {'2015_2016' : [2015,2016],'2017' : [2017],'2018' : [2018]}\n # Print fields of source file.\n with h5py.File(file_src,'r') as f :\n for k in [k for k in f.keys()] :\n print('k',k)\n #print(f'{k}: {f[k].dtype}')\n print('start creating the final .h5 file')\n # Create h5.\n with h5py.File(file_src,'r') as fs,h5py.File(file_dest,'w') as fd :\n for group_name,years in groups.items() :\n grp = fd.create_group(group_name)\n\n # Find examples of the specified years.\n indices = np.zeros_like(fs['year'][:],dtype='bool')\n for year in years :\n indices = np.logical_or(fs['year'][:] == year,indices)\n print('indices',year,indices.shape,indices.sum())\n # Find examples that don't have nans.\n indices[np.any(np.isnan(fs['py_S'][:]),axis=1)] = 0\n indices[np.isnan(fs['sigma0'][:])] = 0\n indices[np.isnan(fs['normalizedVariance'][:])] = 0\n # Done\n num_examples = indices.sum()\n print(f'Found {num_examples} events from years: ',years)\n\n # Write data from this year.\n # print(fs['year'][indices].shape)\n grp.create_dataset('year',data=fs['year'][indices])\n\n # Get 22 CWAVE features.\n #cwave = np.hstack([fs['py_S'][indices,...],fs['sigma0'][indices].reshape(-1,1),\n # fs['normalizedVariance'][indices].reshape(-1,1)])\n #cwave = preprocess.conv_cwave(cwave) # Remove extrema, then standardize with hardcoded mean,vars.\n cwave = fs['cwave'][indices,...]\n grp.create_dataset('cwave',data=cwave)\n\n # Additional features.\n dx = preprocess.conv_dx(fs['dx'][indices]) #I keep the normalisation here for dx and dt\n dt = preprocess.conv_dt(fs['dt'][indices])\n grp.create_dataset('dxdt',data=np.column_stack([dx,dt]))\n\n latSAR = fs['latSAR'][indices]\n lonSAR = fs['lonSAR'][indices]\n latSARcossin = preprocess.conv_position(latSAR) # Gets cos and sin\n lonSARcossin = preprocess.conv_position(lonSAR)\n grp.create_dataset('latlonSAR',data=np.column_stack([latSAR,lonSAR]))\n grp.create_dataset('latlonSARcossin',data=np.hstack([latSARcossin,lonSARcossin]))\n print('timeSAR',fs['timeSAR'].shape)\n timeSAR = fs['timeSAR'][:].squeeze()[indices]\n #todSAR = preprocess.conv_time(timeSAR)\n todSAR = fs['todSAR'][:].squeeze()[indices]\n grp.create_dataset('timeSAR',data=timeSAR,shape=(timeSAR.shape[0],1))\n grp.create_dataset('todSAR',data=todSAR,shape=(todSAR.shape[0],1))\n\n incidence = preprocess.conv_incidence(fs['incidenceAngle'][indices]) # Separates into 2 var.\n grp.create_dataset('incidence',data=incidence)\n\n satellite = fs['satellite'][indices]\n grp.create_dataset('satellite',data=satellite,shape=(satellite.shape[0],1))\n\n # Altimeter\n hsALT = fs['hsALT'][:].squeeze()[indices]\n grp.create_dataset('hsALT',data=hsALT,shape=(hsALT.shape[0],1))\n\n # Get spectral data.\n x = np.stack((preprocess.conv_real(fs['cspcRe'][indices,...]),\n preprocess.conv_imaginary(fs['cspcIm'][indices,...]),\n ),\n axis=3)\n grp.create_dataset('spectrum',data=x)\n print(f'Done with {years}')\n print('Done')", "def trim_myogram(raw_data, path, slicing_index='Stim'):\n\t# Collect data\n\tvolt_data = []\n\tstim_data = []\n\tslices_begin_time = []\n\tglobal title\n\n\t# data processing\n\ttitle_stim = 'Stim'\n\ttitle_rmg = 'RMG'\n\ttitle_rta = 'RTA'\n\tfor index, data_title in enumerate(raw_data['titles']):\n\t\tdata_start = int(raw_data['datastart'][index]) - 1\n\t\tdata_end = int(raw_data['dataend'][index])\n\t\tfloat_data = [round(float(x), 3) for x in raw_data['data'][0][data_start:data_end]]\n\t\tif title_rmg in data_title:\n\t\t\tvolt_data = float_data\n\t\t# if title_rta in data_title:\n\t\t# \tvolt_data = float_data\n\t\tif title_stim in data_title:\n\t\t\tstim_data = float_data\n\n\t# convert_bio_to_hdf5(volt_data, stim_data, path)\n\n\timport h5py as hdf5\n\t# with hdf5.File(path + \".hdf5\") as file:\n\t\t# for k,v in file.items():\n\t\t\t# print(k, v[:])\n\n\t# find peaks in stimulations data\n\tms_pause = 0\n\tbio_step = 0.25\n\t# print(\"stim_data = \", stim_data)\n\tfor index in range(1, len(stim_data) - 1):\n\t\tif stim_data[index - 1] < stim_data[index] > stim_data[index + 1] and ms_pause <= 0 and\\\n\t\t\t\tstim_data[index] > 0.5:\n\t\t\tslices_begin_time.append(index) # * real_data_step # division by 4 gives us the normal 1 ms step size\n\t\t\tms_pause = int(3 / bio_step)\n\t\tms_pause -= 1\n\t# print(\"slices_begin_time = \", slices_begin_time)\n\t# remove unnecessary data, use only from first stim, and last stim\n\tvolt_data = volt_data[slices_begin_time[0]:slices_begin_time[-1]]\n\n\t# move times to the begin (start from 0 ms)\n\tslices_begin_time = [t - slices_begin_time[0] for t in slices_begin_time]\n\t# print(\"len(volt_data) = \", len(volt_data))\n\treturn volt_data, slices_begin_time", "def load_obstab_feedback_sliced(self, dataset='' , file ='' , datetime='' ):\n k = dataset \n F = file \n dt = datetime\n \n if dt != self.unique_dates[k][F]['up_to_dt_slice']:\n print(\"Error! the dit does not correspond to the dt I calculated in the previous loading! \")\n return 0\n \n logging.debug(\" === (Re)Load data for %s file %s counter %s\" , dataset, file, data[k][F][\"counter\"])\n print(blue + 'Memory used before reading data: ', process.memory_info().rss/1000000000 , cend)\n \n slice_size = self.slice_size\n \n file = data[k][F]['h5py_file']\n rts, ri = data[k][F][\"recordtimestamp\"][:] , data[k][F][\"recordindex\"][:]\n\n index_min = self.unique_dates[k][F]['indices'][dt]['low'] # here no offset since I am reading the original data \n ind = np.where(rts==dt)[0][0] # index of specific dt , I need the extremes indices of the next date_time after slicing \n \n try: \n up_to_dt_slice = rts[ind + slice_size ] # \n index_max = self.unique_dates[k][F]['indices'][up_to_dt_slice]['low'] # maximum index in the array of date_time to slice on\n update_index = True\n except:\n \"\"\" If the dt is too large, I take the whole array \"\"\"\n index_max = 1000000000000000\n update_index = False \n \n \n ####################\n # OBSERVATIONS TABLE\n #################### \n logging.debug ('*** Loading observations_table' )\n obs_tab = file['observations_table'] \n\n #print('CHECKING THE INDICES:::: ' , k , ' index_min ', index_min , ' index_max ', index_max )\n obs_dic= {} \n for ov in self.observations_table_vars:\n v = copy.deepcopy( obs_tab[ov][index_min:index_max ] )\n obs_dic[ov] = v \n data[k][F]['observations_table']= obs_dic \n\n ###########\n # ERA5FB\n ###########\n if k == 'era5_1' or k == 'era5_2':\n logging.debug('*** Loading era5fb ' )\n era5fb_tab = file['era5fb']\n fb_dic = {} \n for ov in self.era5fb_columns:\n try:\n v = copy.deepcopy( era5fb_tab[ov][index_min:index_max ] )\n fb_dic[ov] = v \n except:\n continue\n #print(\"CANNOT FIND \", ov ) \n \n data[k][F]['era5fb_tab']= fb_dic\n \n print(blue + 'Memory used after reading data: ', process.memory_info().rss/1000000000 , cend)\n \n \"\"\" Updating the indices \"\"\" \n self.unique_dates[k][F]['index_offset'] = copy.deepcopy( self.unique_dates[k][F]['index_offset_next'] ) \n \n if update_index: \n self.unique_dates[k][F]['index_offset_next'] = index_max \n self.unique_dates[k][F]['up_to_dt_slice'] = up_to_dt_slice\n\n return 0", "def saveDailyBlobs():\n\n msgfile = '/users/global/cornkle/MCSfiles/blob_map_allscales_-50_JJAS_points_dominant.nc'\n msg = xr.open_dataarray(msgfile)\n\n # def first_nozero(array_like, axis):\n # array_like[array_like<16]= array_like[array_like<16]+24\n # return np.nanmin(array_like,axis=axis)\n\n msg.values[msg.values > 75] = np.nan\n msg.values[msg.values == 0] = np.nan\n\n for m in msg:\n if m['time.hour'].values >= 16:\n m.values[m > 0] = m['time.hour'].values\n else:\n m.values[m > 0] = m['time.hour'].values+24\n\n ### this is useful, it removes all pixels which got rain twice on a day\n md = msg.resample('24H', base=16, dim='time', skipna=True, how='min')\n\n md = md[(md['time.month'] >=6) & (md['time.month'] <=9)]\n\n md.values[md.values>23] = md.values[md.values>23]-24\n\n md.to_netcdf('/users/global/cornkle/MCSfiles/blob_map_allscales_-50_JJAS_points_dominant_daily.nc')", "def make_obstab_era5fb_dic(self, dataset = '' , date_time = '', File = ''):\n index_offset = self.unique_dates[dataset][File]['index_offset']\n \n # Removing the index_offset, which is defined only if any slicing was done \n index = self.unique_dates[dataset][File]['indices'][date_time]['low'] - index_offset\n index_up = self.unique_dates[dataset][File]['indices'][date_time]['up'] - index_offset\n \n obs_dic = {} \n for v in self.observations_table_vars:\n obs_dic[v] = data[dataset][File]['observations_table'][v][index:index_up]\n #print('v is : ', v )\n\n \"\"\" Loop over the obs_tab to find duplicates.\n I fill a dictionary for each distinct pressure level, and I put inside\n the observed_variable number.\n If the list lready contains the combination pressure level - observed variable,\n then the record is skipped \"\"\"\n\n indices = [] # these are the only non-duplicates to be kept\n\n already_selected = { }\n \n #print('starting the loop: ' , date_time, ' ' , dataset, ' ', index, ' ' , index_up)\n for p,var,val,ind in zip ( obs_dic['z_coordinate'] , obs_dic['observed_variable'],obs_dic['observation_value'] ,range(len(obs_dic['z_coordinate'])) ):\n #print(p,var,val,ind)\n #if date_time > 2354300000:\n # print('looping :::', var, ' ' , val, ' ' , ind , ' ', dataset, ' ' , index_up, ' ' , index, ' ', File)\n \n if self.only_std_plevels:\n if p not in self.std_plevs:\n continue \n\n \n if p not in already_selected.keys():\n already_selected[p] = []\n \n \n if np.isfinite(val):\n if var not in already_selected[p]:\n already_selected[p].append(var)\n indices.append(ind) # record to be kept\n else:\n pass\n else: # skipping nans\n pass\n\n #print('done with the loop')\n red_obs_dic = {} # dictionary for the reduced (removed duplicates) obs_tab\n for v in self.observations_table_vars:\n red_obs_dic[v] = obs_dic[v][indices]\n\n ''' Simply returns the proper format for ''null' value '''\n def get_null( tipo = ''):\n if tipo == np.int32 :\n void = 0\n elif tipo == np.float32 :\n void = 0.0\n elif tipo == np.bytes_ :\n void = b'nan'\n return void\n \n ''' Filling the feedback table. Only feednack for era5_1 and era5_2 are currently available. \n Reads the total number of possible columns from the dic_type_attributes dictionary.\n Era5_1 and era5_2 fb have different columns.\n If data for a variable is not available, it fills with the appropriate null value '''\n \n #print('making the era5fb ', date_time, ' ' , dataset)\n red_era5fb_dic = {}\n for v in self.era5fb_columns:\n tipo = self.dic_type_attributes['era5fb'][v]['type'] \n if dataset == 'era5_1' or dataset == 'era5_2':\n if v in data[dataset][File]['era5fb_tab'].keys(): \n red_era5fb_dic[v] = data[dataset][File]['era5fb_tab'][v][index:index_up][indices]\n else:\n void = get_null(tipo = tipo)\n red_era5fb_dic[v]= np.full(len(indices), void) \n else: # no feedback for non era%-1 or era5_2 datasets \n void = get_null(tipo = tipo)\n red_era5fb_dic[v]= np.full(len(indices), void)\n \n #print('done making_obstab_era5fb')\n \"\"\"\n try:\n if len(red_obs_dic['date_time']) > 2:\n print('yes')\n else:\n print('check') \n except:\n print('check')\n \"\"\" \n return red_obs_dic , red_era5fb_dic", "def nc_to_hdf5_mudis(dataset, config):\n np.warnings.filterwarnings('ignore')\n\n date = datetime.datetime.strptime(dataset.recorddate,\n '%d.%m.%Y') # read date from dateset\n date_name = datetime.datetime.strftime(date,\n '%Y%m%d') # convert date to YYYYMMDD format\n config['date'] = date_name\n\n # Create the directory to save the results\n path = config['str_dir'] + '/radiance/{}/data/'.format(config['date'])\n os.makedirs(os.path.dirname(path), exist_ok=True)\n\n # Read time of the file (correct time)\n time = datetime.datetime.strptime(dataset.recordtime, '%H:%M:%S.')\n time = datetime.datetime.time(time)\n\n # convert time to datetime format\n datetime_name = datetime.datetime.combine(date, time)\n new_name = datetime.datetime.strftime(datetime_name, '%Y%m%d_%H%M%S')\n\n # radiance = dataset.variables['data'][:].reshape(113, 1281)\n # wavelength_axis = dataset.variables['xAxis'][:]\n\n # Create a file in the disk\n with h5py.File(config['str_dir'] + '/radiance/{}/data/{}.h5'.format(\n config['date'], new_name), 'w') as datos:\n\n if not list(datos.items()):\n # Create two datasets(use only one time)\n datos.create_dataset('/data',\n data=dataset['data'][:].reshape(113, 1281),\n dtype='f4')\n # datos.create_dataset('/skymap', data=skymap, dtype='f4')\n else:\n del datos['data']\n # del datos['skymap']\n print('data deleted and corrected')\n datos.create_dataset('/data', data=data, dtype='f4')\n # datos.create_dataset('/skymap', data=skymap, dtype='f4')\n\n # Add attributes to datasets\n datos['data'].attrs['time'] = str(time)\n datos['data'].attrs['Exposure'] = dataset.exposuretime\n datos['data'].attrs['NumAver'] = dataset.AVERAGED\n datos['data'].attrs['CCDTemp'] = dataset.detectortemperature\n datos['data'].attrs['NumSingMes'] = dataset.noofaccumulations\n # datos['data'].attrs['ElectrTemp'] = dataset.\n datos['data'].attrs['Latitude'] = '52.39N'\n datos['data'].attrs['Longitude'] = '9.7E'\n datos['data'].attrs['Altitude'] = '65 AMSL'\n\n chn = np.arange(1, 114)\n datos.create_dataset('/channel', data=chn, dtype=np.float32)\n datos.create_dataset('/wavelength', data=dataset['xAxis'][:])\n\n datos['data'].dims.create_scale(datos['channel'], 'channel')\n datos['data'].dims[0].attach_scale(datos['channel'])\n datos['data'].dims[0].label = 'channel'\n datos['data'].dims[1].label = 'wavelength'\n\n # datos['skymap'].dims[0].label = 'channel'\n # datos['skymap'].dims[1].label = 'Azimuth, Zenith'\n\n datos.close()", "def test_003_not_enough_datetimes() -> None:\n df = generate_test_data()\n df = df.head(2)\n skim(df)", "def close(self):\n end_time = get_isotime()\n for entry in self.to_close:\n entry[\"end_time\"] = end_time\n self.h5.close()", "def onestatfile():\n with hp.File('StatsFile.h5', 'w') as onefile:\n alldata = np.empty((600, 4, 3, 500), dtype=np.float32)\n for j in range(600):\n for i in range(3):\n msd, vol, rms, asp = getstats(i, j+1)\n alldata[j, 0, i, :] = msd\n alldata[j, 1, i, :] = vol\n alldata[j, 2, i, :] = rms\n alldata[j, 3, i, :] = asp\n onefile.create_dataset('Stats', data=alldata, chunks=(1, 4, 3, 500),\n compression='gzip', compression_opts=9)", "def fix_vanHove_dtime(vh_key,conn):\n\n # look up trk_key\n (trk_key,) = conn.execute(\"select track_key from vanHove_prams where comp_key = ?\"\n ,(vh_key,)).fetchone()\n # get avgerage dtime\n dtime = gen.avg_dtime(trk_key,conn)\n # set dtime attribute\n (fname,) = conn.execute(\"select fout from comps where comp_key = ? and function = 'vanHove'\"\n ,(vh_key,)).fetchone()\n Fin = h5py.File(fname,'r+')\n g = Fin[fd('vanHove',vh_key)]\n \n g.attrs['dtime'] = dtime\n\n Fin.close()\n del Fin", "def filter_time_slices(time_slices, apt_no, exp_no):\n # Removing the extraneous time slices\n if apt_no == '102A' and exp_no == '3':\n discard_ts = time_slices[\n (time_slices.phase == 'Not Found') & (time_slices.magnitude < 100)]\n time_slices = time_slices.ix[time_slices.index - discard_ts.index]\n\n elif apt_no == '603':\n print \"here\"\n # Likely power consumption of fridge is 110-150\n time_slices = time_slices[(time_slices.magnitude < 110) | (time_slices.magnitude > 150) &\n (time_slices.type == 'power')]\n # 25-26Nov\n if exp_no == '25-26Nov':\n time_slices = time_slices[time_slices.end_time < 1385404505]\n elif exp_no == '26-27Nov':\n time_slices = time_slices[time_slices.end_time < 1385492334]\n\n elif apt_no == '703':\n # Likely power consumption of fridge is 130-152\n fridge_ts = time_slices[(time_slices.magnitude >= 130) & (time_slices.magnitude <= 170) &\n (time_slices.type == 'power')]\n time_slices = time_slices.ix[time_slices.index - fridge_ts.index]\n\n # Likely power consumption of geyser > 2000 but on light phase > 1000\n geyser_ts = time_slices[(time_slices.magnitude > 1000) & (time_slices.type == 'light')]\n time_slices = time_slices.ix[time_slices.index - geyser_ts.index]\n\n # 26-27Nov\n if exp_no == '26-27Nov':\n washing_ts = time_slices[\n (time_slices.start_time >= 1385470967) & (time_slices.end_time <= 1385471880)]\n time_slices = time_slices.ix[time_slices.index - washing_ts.index]\n\n # 28-29Nov\n if exp_no == '28-29Nov':\n time_slices = time_slices[\n (time_slices.start_time < 1385646060) | (time_slices.end_time > 1385648143)]\n\n # Removing time slices with duration less than 30 seconds\n idx_list = []\n for idx in time_slices.index:\n start_time = time_slices.ix[idx]['start_time']\n end_time = time_slices.ix[idx]['end_time']\n magnitude = time_slices.ix[idx]['magnitude']\n\n time_diff = end_time - start_time\n\n if time_diff < 30 and magnitude < 80:\n print \"idx\", idx, \"time_diff\", time_diff, \"magnitude\", magnitude\n # Qualified for filtering it\n idx_list.append(idx)\n time_slices = time_slices.ix[time_slices.index - idx_list]\n\n return time_slices", "def write_h5(\n lk_file,\n output_filename,\n compression_level=5,\n omit_data=None,\n *,\n crop_time_range=None,\n verbose=False,\n):\n import h5py\n\n omit_data = {omit_data} if isinstance(omit_data, str) else omit_data\n h5_file = lk_file.h5\n\n with h5py.File(output_filename, \"w\") as out_file:\n\n def traversal_function(name, node):\n if omit_data and any([fnmatch(name, o) for o in omit_data]):\n if verbose:\n print(f\"Omitted {name} from export\")\n return\n\n if isinstance(node, h5py.Dataset):\n if node.dtype.kind == \"O\":\n with warnings.catch_warnings():\n warnings.filterwarnings(\n action=\"ignore\",\n category=FutureWarning,\n message=\"Direct access to this field is deprecated\",\n )\n\n _write_cropped_metadata(\n lk_file, out_file, name, node, crop_time_range, verbose\n )\n else:\n _write_numerical_data(\n lk_file, out_file, name, node, compression_level, crop_time_range, verbose\n )\n\n else:\n out_file.create_group(f\"{name}\")\n out_file[name].attrs.update(node.attrs)\n\n h5_file.visititems(traversal_function)\n out_file.attrs.update(h5_file.attrs)", "def batch_download_h8(_date_,\\\n path_himawari,\\\n path_himawari_hdf5,\\\n dat_list = dat_list,\\\n llcrnrlon = x_ll,\\\n llcrnrlat = y_ll,\\\n urcrnrlon = x_ur,\\\n urcrnrlat = y_ur,\\\n dat_segment = dat_segment,\\\n dat_list_reso = dat_list_reso,\\\n hrit_list = hrit_list ,\\\n dat_listnum = dat_listnum,\n hrit_listb = hrit_listb,\\\n dat_listnuma = dat_listnuma,\n dat_listnumb = dat_listnumb,\\\n hrit_spa = hrit_spa,\\\n hrit_spb = hrit_spb,\n create_fd_internal = True):\n \n \n date_obj = [datetime.datetime.strptime(i,\"%Y%m%d_%H%M\") for i in _date_]\n date_obj1 = N.array([datetime.datetime.strftime(i,\"/%Y%m/%d/%H/\") for i in date_obj])\n \n final_out_filename_list = []\n path_himawari_hdf5_out_list = []\n for i, j in enumerate(_date_):\n start_time = datetime.datetime.now()\n \n print \"\\n\"\n print \"=\"*80\n \n # if True, creates a subdirectory within default directory to store h8 data\n if create_fd_internal == True:\n path_himawari_hdf5_out = os.path.join(path_himawari_hdf5, j)\n elif create_fd_internal == False:\n path_himawari_hdf5_out = path_himawari_hdf5\n else:\n os.sys.exit(\"Create folder internal or external options not given by user.\")\n \n # if ouput directory does not exist\n if os.path.exists(path_himawari_hdf5_out) == False:\n create_path_directories(path_himawari_hdf5_out)#creates directory for output\n \n file_name_if_dl = \"HS_H08_\"+ j +\"_PH_R20_S030405.hdf5\"\n \n #print os.path.join(path_himawari_hdf5_out, file_name_if_dl)\n # if processed himawari data for ceratin date is already downloaded, skip timestamp\n if os.path.exists(os.path.join(path_himawari_hdf5_out, file_name_if_dl)) == True:\n print \"\\n\"\n print file_name_if_dl, 'exists'\n final_out_filename_list.append(file_name_if_dl)\n path_himawari_hdf5_out_list.append(path_himawari_hdf5_out)\n continue\n \n # downloads the needed data through wget\n dat_bz = download_h8(j, date_obj1[i], path_himawari,\\\n dat_list = dat_list,\\\n dat_segment = dat_segment,\\\n dat_list_reso = dat_list_reso)\n \n # Checks the downloaded H8 list\n # If datalist is empty, skip timestamp and append on list\n if dat_bz[0] == \"nan\":\n path_himawari_hdf5_out_list.append(str(N.nan))\n final_out_filename_list.append(str(N.nan))\n \n print \"\\n\"\n print \"Skipping \"+j\n print \"Removing temporary data on \"+path_himawari\n os.system(\"rm \"+os.path.join(path_himawari,\"*\"))\n continue\n \n datbz_fnames = open_list_datbz2(path_himawari)\n \n # double checks data if downloaded\n datbz_fnames_final = check_datbz_files(datbz_fnames, j,\\\n dat_list = dat_list,\\\n dat_segment = dat_segment,\\\n dat_list_reso = dat_list_reso)\n \n \n if datbz_fnames_final[0] == \"nan\":\n path_himawari_hdf5_out_list.append(str(N.nan))\n final_out_filename_list.append(str(N.nan))\n \n print \"\\nSkipping \"+j\n print \"Removing temporary data on \"+path_himawari\n os.system(\"rm \"+os.path.join(path_himawari,\"*\"))\n continue\n \n # downsample and HRIT conversion\n hrit_fnames = preparation_himawari(j, datbz_fnames_final, path_himawari)\n \n # necessary preprocessing methods\n final_out_filename = whole_preprocess_H8(hrit_fnames,\\\n path_himawari,\\\n path_himawari_hdf5_out,\\\n llcrnrlon = llcrnrlon,\\\n llcrnrlat = llcrnrlat,\\\n urcrnrlon = urcrnrlon,\\\n urcrnrlat = urcrnrlat,\\\n dat_segment = dat_segment,\\\n hrit_list = hrit_list ,\\\n dat_listnum = dat_listnum,\\\n hrit_listb = hrit_listb,\\\n dat_listnuma = dat_listnuma,\\\n dat_listnumb = dat_listnumb,\\\n hrit_spa = hrit_spa,\\\n hrit_spb = hrit_spb)\n \n os.system(\"rm \"+os.path.join(path_himawari,\"*\"))# deletes all files in path_himawari \n print \"\\nTemporary files are deleted in \"+ path_himawari\n \n # summary list\n final_out_filename_list.append(final_out_filename)\n path_himawari_hdf5_out_list.append(path_himawari_hdf5_out)\n \n time1 = datetime.datetime.now()\n print('\\n\\n\\tDuration of whole process: {}'.format(time1 - start_time))\n\n \n return path_himawari_hdf5_out_list, final_out_filename_list", "def test_create():\n\n with tempfile.TemporaryDirectory() as td:\n fp = os.path.join(td, 'outputs.h5')\n\n with Outputs(fp, 'w') as f:\n f.meta = meta\n f.time_index = time_index\n\n with h5py.File(fp, 'r') as f:\n test_meta = pd.DataFrame(f['meta'][...])\n test_ti = f['time_index'][...]\n assert test_meta.shape == (100, 2)\n assert len(test_ti) == 8760\n\n assert f.attrs['package'] == 'reV'\n assert f.attrs['version'] == __version__", "def make_obslog(path):\n name_pattern = '^HI\\.\\d{8}\\.\\d{5}\\.fits$'\n\n # scan the raw files\n fname_lst = sorted(os.listdir(path))\n\n # prepare logtable\n logtable = Table(dtype=[\n ('frameid', 'i2'), ('fileid', 'S17'), ('imgtype', 'S3'),\n ('object', 'S20'), ('i2cell', 'bool'), ('exptime', 'f4'),\n ('obsdate', Time),\n ('deckname', 'S2'), ('filter1', 'S5'), ('filter2', 'S5'),\n ('nsat_1', 'i4'), ('nsat_2', 'i4'), ('nsat_3', 'i4'),\n ('q95_1', 'i4'), ('q95_2', 'i4'), ('q95_3', 'i4'),\n ])\n\n # prepare infomation to print\n pinfo = FormattedInfo(all_columns,\n ['frameid', 'fileid', 'imgtype', 'object', 'i2cell', 'exptime',\n 'obsdate', 'deckname', 'nsat_2', 'q95_2'])\n\n # print header of logtable\n print(pinfo.get_separator())\n print(pinfo.get_title())\n print(pinfo.get_separator())\n\n # start scanning the raw files\n prev_frameid = -1\n for fname in fname_lst:\n if not re.match(name_pattern, fname):\n continue\n fileid = fname[0:17]\n filename = os.path.join(path, fname)\n hdu_lst = fits.open(filename)\n # parse images\n data_lst, mask_lst = parse_3ccd_images(hdu_lst)\n\n head0 = hdu_lst[0].header\n\n frameid = prev_frameid + 1\n\n # get obsdate in 'YYYY-MM-DDTHH:MM:SS' format\n date = head0.get('DATE-OBS')\n utc = head0.get('UTC', head0.get('UT'))\n obsdate = Time('%sT%s'%(date, utc))\n\n exptime = head0.get('ELAPTIME')\n i2in = head0.get('IODIN', False)\n i2out = head0.get('IODOUT', True)\n i2cell = i2in\n imagetyp = head0.get('IMAGETYP')\n targname = head0.get('TARGNAME', '')\n lampname = head0.get('LAMPNAME', '')\n\n if imagetyp == 'object':\n # science frame\n imgtype = 'sci'\n objectname = targname\n elif imagetyp == 'flatlamp':\n # flat\n imgtype = 'cal'\n objectname = '{} ({})'.format(imagetyp, lampname)\n elif imagetyp == 'arclamp':\n # arc lamp\n imgtype = 'cal'\n objectname = '{} ({})'.format(imagetyp, lampname)\n elif imagetyp == 'bias':\n imgtype = 'cal'\n objectname = 'bias'\n else:\n print('Unknown IMAGETYP:', imagetyp)\n\n # get deck and filter information\n deckname = head0.get('DECKNAME', '')\n filter1 = head0.get('FIL1NAME', '')\n filter2 = head0.get('FIL2NAME', '')\n\n # determine the numbers of saturated pixels for 3 CCDs\n mask_sat1 = (mask_lst[0] & 4)>0\n mask_sat2 = (mask_lst[1] & 4)>0\n mask_sat3 = (mask_lst[2] & 4)>0\n nsat_1 = mask_sat1.sum()\n nsat_2 = mask_sat2.sum()\n nsat_3 = mask_sat3.sum()\n\n # find the 95% quantile\n q95_lst = [np.sort(data.flatten())[int(data.size*0.95)]\n for data in data_lst]\n q95_1, q95_2, q95_3 = q95_lst\n\n # close the fits file\n hdu_lst.close()\n\n item = [frameid, fileid, imgtype, objectname, i2cell, exptime, obsdate,\n deckname, filter1, filter2,\n nsat_1, nsat_2, nsat_3, q95_1, q95_2, q95_3]\n\n logtable.add_row(item)\n # get table Row object. (not elegant!)\n item = logtable[-1]\n\n # print log item with colors\n string = pinfo.get_format(has_esc=False).format(item)\n print(print_wrapper(string, item))\n\n prev_frameid = frameid\n\n print(pinfo.get_separator())\n\n # sort by obsdate\n #logtable.sort('obsdate')\n\n # determine filename of logtable.\n # use the obsdate of the LAST frame.\n obsdate = logtable[-1]['obsdate'].iso[0:10]\n outname = '{}.obslog'.format(obsdate)\n if os.path.exists(outname):\n i = 0\n while(True):\n i += 1\n outname = '{}.{}.obslog'.format(obsdate, i)\n if not os.path.exists(outname):\n outfilename = outname\n break\n else:\n outfilename = outname\n\n # save the logtable\n\n # loginfo is not pinfo because not all columns need to be printed in the\n # screen, but all columns should be written in logfile.\n loginfo = FormattedInfo(all_columns)\n outfile = open(outfilename, 'w')\n outfile.write(loginfo.get_title()+os.linesep)\n outfile.write(loginfo.get_dtype()+os.linesep)\n outfile.write(loginfo.get_separator()+os.linesep)\n for row in logtable:\n outfile.write(loginfo.get_format(has_esc=False).format(row)+os.linesep)\n outfile.close()", "def write_merged_file(self):\n \n #out_name = os.getcwd() + '/FAST_INDEX_merged_' + [ x for x in self.datasets[ list(self.datasets_keys)[0]].split('/') if '.nc' in x ] [0] \n \n \"\"\" Loading the econding of variables created from the harvester script \"\"\"\n encodings = np.load('groups_encodings.npy' , allow_pickle = True ).item()\n \n if not os.path.isdir(self.out_dir):\n Path(self.out_dir).mkdir(parents=True, exist_ok=True)\n \n out_name = self.out_dir + '/' + self.station + '_CEUAS_merged_v0.nc' \n \n logging.info('Writing the observations_tables to the netCDF output via xarray to_netcdf() ')\n #obs_tab = self.MergedObs[ ['date_time' , 'latitude', 'longitude' , 'observation_value' , 'observed_variable' , 'source_id' , 'observation_id', 'z_coordinate' ] ] # including only some columns \n obs_tab = self.MergedObs # including only some columns \n obs_tab = self.add_cdm_missing_columns(obs_tab) \n \n \"\"\" \n # Old using xarray\n obs_tab = obs_tab.to_xarray() \n for v in obs_tab.variables:\n if v == \"index\" or v == \"hdrlen\" or 'string' in v:\n continue\n obs_tab[v].attrs['external_table'] = self.attributes['observations_table'][v]['external_table']\n obs_tab[v].attrs['description'] = self.attributes['observations_table'][v]['description']\n \"\"\"\n\n for k in obs_tab.columns:\n print('Writing the observation table using h5py new method for the variable: ' , k )\n df = obs_tab[ [k] ] # making a 1 column dataframe \n write_dict_h5(out_name, df, k, encodings['observations_table'], var_selection=[], mode='a', attrs={'date_time':('units','seconds since 1900-01-01 00:00:00')})\n \n #obs_tab.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='w' , group = 'observations_table') # writing the merged observations_table \n \n \n \n logging.info('Writing the header_table to the netCDF output via xarray ')\n head_tab = self.MergedHead.to_xarray()\n for v in head_tab.variables: \n if v == \"index\" or v == \"hdrlen\" or v == \"string80\":\n continue\n head_tab[v].attrs['external_table'] = self.attributes['header_table'][v]['external_table']\n head_tab[v].attrs['description'] = self.attributes['header_table'][v]['description']\n \n head_tab.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = 'header_table') # writing the merged observations_table \n \n \n \n logging.info('Writing the station_configuration and source_configurations tables to the netCDF output via xarray ') \n for k in self.data.keys():\n if k == 'cdm_tables':\n continue \n group_name = k + '_station_configuration'\n sc = self.data[k]['station_configuration'].to_xarray()\n sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n group_name = k + '_source_configuration'\n sc = self.data[k]['source_configuration'].to_xarray()\n sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n \"\"\" To be fixed ! \"\"\"\n #group_name = k + '_source_configuration'\n #sc = self.data[k]['source_configuration'][:1].to_xarray()\n #sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name ) \n \n logging.info('Writing the merged record indices to the netCDF output ') \n di = self.MergedRecordIndex\n di.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a')\n \n logging.info('Writing the merged feedback to the netCDF output ') \n group_name = 'era5fb' \n di = self.MergedFeedback\n di = di.to_xarray()\n di.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n logging.info('Writing the standard cdm tables to the netCDF output ') \n for t in self.data['cdm_tables'].keys(): \n d = self.data['cdm_tables'][t]\n d.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = t )\n \n logging.info('*** Done writing the output netCDF file ')", "def generate_year_weather_data(directory, new_fname):\n ds_whole = concatenate_weather_files(directory)\n print(ds_whole.last())\n ds_whole.last().to_netcdf(directory+new_fname+\".nc\")", "def write_dict_h5(dfile, f, k, fbencodings, var_selection=[], mode='a', attrs={}):\n\n #attrs= {'date_time':('units','seconds since 1900-01-01 00:00:00')}\n #attrs = {'observation_id': ('description', 'unique ID for observation'), 'report_id': ('description', 'Link to header information') , 'date_time':('units','seconds since 1900-01-01 00:00:00') }\n \n with h5py.File(dfile,mode) as fd:\n try:\n fd.create_group(k)\n index=numpy.zeros (f[list(f.keys())[0]].shape[0], dtype='S1')\n fd[k].create_dataset('index', data=index)\n except:\n pass\n if not var_selection:\n var_selection=list(f.keys())\n \n string10=numpy.zeros(fixed_string_len,dtype='S1')\n sdict={}\n slist=[]\n\n #groupencodings \n \n for v in var_selection: \n #variables_dic[v] = ''\n if type(f[v]) == pd.core.series.Series:\n fvv=f[v].values\n else:\n fvv=f[v]\n \n if type(fvv[0]) not in [str,bytes,numpy.bytes_]:\n\n if fvv.dtype !='S1':\n \n fd[k].create_dataset(v,fvv.shape,fvv.dtype,compression=fbencodings[v]['compression'], chunks=True)\n fd[k][v][:]=fvv[:]\n if attrs: # attrs={'date_time':('units','seconds since 1900-01-01 00:00:00')}\n if v in attrs.keys():\n for kk,vv in attrs[v].items():\n if type(vv) is str: \n fd[k][v].attrs[kk]=numpy.bytes_(vv)\n else:\n fd[k][v].attrs[kk]=vv\n \n if v in ['date_time','report_timestamp','record_timestamp']:\n fd[k][v].attrs['units']=numpy.bytes_('seconds since 1900-01-01 00:00:00') #print ( fk, ' ' , v , ' ' , ) \n \n else:\n fd[k].create_dataset(v,fvv.shape,fvv.dtype,compression=fbencodings[v]['compression'], chunks=True)\n fd[k][v][:]=fvv[:]\n slen=fvv.shape[1]\n sdict[v]=slen\n if slen not in slist:\n slist.append(slen)\n try:\n fd[k].create_dataset( 'string{}'.format(slen), data=string10[:slen] )\n except:\n pass \n if v in attrs.keys():\n fd[k][v].attrs['description']=numpy.bytes_(attrs[v]['description'])\n fd[k][v].attrs['external_table']=numpy.bytes_(attrs[v]['external_table'])\n \n else:\n sleno=len(fvv[0])\n slen=sleno\n try:\n slen=int(fvv.dtype.descr[0][1].split('S')[1])\n except: \n pass\n\n sdict[v]=slen\n if slen not in slist:\n slist.append(slen)\n try:\n fd[k].create_dataset( 'string{}'.format(slen), data=string10[:slen] )\n except:\n pass \n \n #x=x.reshape(fvv.shape[0],slen)\n fd[k].create_dataset(v,data=fvv.view('S1').reshape(fvv.shape[0],slen),compression=fbencodings[v]['compression'],chunks=True)\n if v in attrs.keys():\n fd[k][v].attrs['description'] =numpy.bytes_(attrs[v]['description'])\n fd[k][v].attrs['external_table']=numpy.bytes_(attrs[v]['external_table']) \n \n #variables_dic[v] = f[v].values.dtype\n \n for v in fd[k].keys(): #var_selection:\n l=0 \n \n '''\n if v == 'primary_station_id':\n try:\n fd[k][v].dims[l].attach_scale(fd[k]['index'])\n except:\n pass\n \n try:\n slen = len( fd[k][v][0] )\n stringa=numpy.zeros( slen , dtype='S1')\n fd[k].create_dataset( 'string{}'.format(slen), data= stringa ) \n fd[k][v].dims[1].attach_scale( fd[k]['string{}'.format(slen)] ) \n except:\n fd[k][v].dims[1].attach_scale( fd[k]['string{}'.format(slen)] ) \n \n \n if v == 'station_name':\n try:\n fd[k][v].dims[l].attach_scale(fd[k]['index'])\n slen = len( fd[k][v][0][0])\n stringa=numpy.zeros( slen , dtype='S1')\n except:\n pass\n try:\n fd[k].create_dataset( 'string{}'.format(slen), data= stringa )\n fd[k][v].dims[1].attach_scale( fd[k]['string{}'.format(slen)] ) \n print('done attaching')\n except:\n print('not working')\n \n ''' \n try:\n if type(f[v]) == pd.core.series.Series:\n fvv=f[v].values\n else:\n fvv=f[v]\n if 'string' not in v and v!='index': \n fd[k][v].dims[l].attach_scale(fd[k]['index'])\n #print(v,fvv.ndim,type(fvv[0]))\n if fvv.ndim==2 or type(fvv[0]) in [str,bytes,numpy.bytes_]:\n slen=sdict[v]\n #slen=10\n fd[k][v].dims[1].attach_scale(fd[k]['string{}'.format(slen)])\n except:\n pass\n \n \n \n i=4 \n for v in slist:\n s='string{}'.format(v)\n for a in ['NAME']:\n fd[k][s].attrs[a]=numpy.bytes_('This is a netCDF dimension but not a netCDF variable.')\n \n i+=1\n \n return", "def writeH5Dataset( self, foldername, time, nameConvention = \"grid\" ):\n filename = \"{0}/{1}_{2:06}.h5\".format(foldername,nameConvention,time)\n file = h5py.File(filename,'w',driver='mpio',comm=self.global_comm)\n dset = file.create_dataset(\"dset\",self._layout.fullShape, dtype = self._f.dtype)\n slices = tuple([slice(s,e) for s,e in zip(self._layout.starts,self._layout.ends)])\n dset[slices]=self._f[:]\n attr_data = np.array(self._layout.dims_order)\n dset.attrs.create(\"Layout\", attr_data, (self._nDims,), h5py.h5t.STD_I32BE)\n file.close()", "def soho_ephin_loader(startdate, enddate, resample=None, path=None, all_columns=False, pos_timestamp=None, use_uncorrected_data_on_own_risk=False):\n\n if not path:\n path = sunpy.config.get('downloads', 'download_dir') + os.sep\n\n # create list of files to load:\n dates = pd.date_range(start=startdate, end=enddate, freq='D')\n filelist = []\n for i, doy in enumerate(dates.day_of_year):\n if dates[i].year<2000:\n pre = \"eph\"\n yy = dates[i].year-1900\n else:\n pre = \"epi\"\n yy = dates[i].year-2000\n name = \"%s%02d%03d\" %(pre, yy, doy)\n try:\n file = glob.glob(f\"{path}{os.sep}{name}.rl2\")[0]\n except IndexError:\n print(f\"File {name}.rl2 not found locally at {path}.\")\n file = soho_ephin_download(dates[i], path)\n if len(file) > 0:\n filelist.append(file)\n if len(filelist) > 0:\n filelist = np.sort(filelist)\n\n col_names = ['Year', 'DOY', 'MS', 'S/C Epoch', 'Status Word part 1', 'Status Word part 2',\n 'E150', 'E300', 'E1300', 'E3000', 'P4', 'P8', 'P25', 'P41',\n 'H4', 'H8', 'H25', 'H41', 'INT',\n 'P4 GM', 'P4 GR', 'P4 S', 'P8 GM', 'P8 GR', 'P8 S',\n 'P25 GM', 'P25 GR', 'P25 S', 'P41 GM', 'P41 GR', 'P41 S',\n 'H4 GM', 'H4 GR', 'H4 S1', 'H4 S23', 'H8 GM', 'H8 GR', 'H8 S1', 'H8 S23',\n 'H25 GM', 'H25 GR', 'H25 S1', 'H25 S23', 'H41 GM', 'H41 GR', 'H41 S1', 'H41 S23',\n 'Status Flag', 'Spare 1', 'Spare 2', 'Spare 3']\n\n # read files into Pandas dataframes:\n df = pd.read_csv(filelist[0], header=None, sep=r'\\s+', names=col_names)\n if len(filelist) > 1:\n for file in filelist[1:]:\n t_df = pd.read_csv(file, header=None, sep=r'\\s+', names=col_names)\n df = pd.concat([df, t_df])\n\n # # generate datetime index from year, day of year, and milisec of day:\n df.index = doy2dt(df.Year.values, df.DOY.values + df.MS.values/1000./86400.)\n df.index.name = 'time'\n\n # drop some unused columns:\n if not all_columns:\n df = df.drop(columns=['Year', 'DOY', 'MS', 'S/C Epoch',\n 'Status Word part 1', 'Status Word part 2',\n 'P4 GM', 'P4 GR', 'P4 S',\n 'P8 GM', 'P8 GR', 'P8 S',\n 'P25 GM', 'P25 GR', 'P25 S',\n 'P41 GM', 'P41 GR', 'P41 S',\n 'H4 GM', 'H4 GR', 'H4 S1', 'H4 S23',\n 'H8 GM', 'H8 GR', 'H8 S1', 'H8 S23',\n 'H25 GM', 'H25 GR', 'H25 S1', 'H25 S23',\n 'H41 GM', 'H41 GR', 'H41 S1', 'H41 S23',\n 'Spare 1', 'Spare 2', 'Spare 3'])\n\n # Proton and helium measurements need to be corrected for effects determined post-launch,\n # cf. chapter 2.3 of https://www.ieap.uni-kiel.de/et/ag-heber/costep/materials/L2_spec_ephin.pdf\n # Until this correction has been implemented here, these data products are set to -9e9.\n # Setting use_uncorrected_data_on_own_risk=True skips this replacement, so that the uncorrected\n # data can be obtained at own risk!\n if use_uncorrected_data_on_own_risk:\n warnings.warn(\"Proton and helium data is still uncorrected! Know what you're doing and use at own risk!\")\n else:\n df.P4 = -9e9\n df.P8 = -9e9\n df.P25 = -9e9\n df.P41 = -9e9\n df.H4 = -9e9\n df.H8 = -9e9\n df.H25 = -9e9\n df.H41 = -9e9\n\n # replace bad data with np.nan:\n # there shouldn't be bad data in rl2 files!\n # df = df.replace(-9999.900, np.nan)\n\n # derive instrument status and dependencies\n status = df['Status Flag'].values\n\n fmodes = np.zeros(len(status))\n for q in range(len(status)):\n binaries = '{0:08b}'.format(int(status[q]))\n if int(binaries[-1]) == 1:\n if int(binaries[-3]) == 1:\n fmodes[q] = 1\n else:\n fmodes[q] = 2\n\n ringoff = np.zeros(len(status))\n for q in range(len(status)):\n binaries = '{0:08b}'.format(int(status[q]))\n if int(binaries[-2]):\n ringoff[q] = 1\n\n cs_e300 = '0.67 - 3.0 MeV'\n cs_e1300 = '2.64 - 6.18 MeV'\n cs_p25 = '25 - 41 MeV'\n cs_he25 = '25 - 41 MeV/N'\n if max(fmodes)==1:\n cs_e1300 = \"2.64 - 10.4 MeV\"\n cs_p25 = '25 - 53 MeV'\n cs_he25 = '25 - 53 MeV/n'\n if max(fmodes)==2:\n warnings.warn('Careful: EPHIN ring off!')\n\n # failure mode D since 4 Oct 2017:\n # dates[-1].date() is enddate, used to catch cases when enddate is a string\n if dates[-1].date() >= dt.date(2017, 10, 4):\n cs_e300 = 'deactivated bc. of failure mode D'\n cs_e1300 = \"0.67 - 10.4 MeV\"\n # dates[0].date() is startdate, used to catch cases when startdate is a string\n if dates[0].date() <= dt.date(2017, 10, 4):\n warnings.warn('EPHIN instrument status (i.e., electron energy channels) changed during selected period (on Oct 4, 2017)!')\n\n # careful!\n # adjusting the position of the timestamp manually.\n # requires knowledge of the original time resolution and timestamp position!\n if pos_timestamp == 'center':\n df.index = df.index+pd.Timedelta('30s')\n\n # optional resampling:\n if isinstance(resample, str):\n df = resample_df(df, resample, pos_timestamp=pos_timestamp)\n else:\n df = []\n\n meta = {'E150': '0.25 - 0.7 MeV',\n 'E300': cs_e300,\n 'E1300': cs_e1300,\n 'E3000': '4.80 - 10.4 MeV',\n 'P4': '4.3 - 7.8 MeV',\n 'P8': '7.8 - 25 MeV',\n 'P25': cs_p25,\n 'P41': '41 - 53 MeV',\n 'H4': '4.3 - 7.8 MeV/n',\n 'H8': '7.8 - 25.0 MeV/n',\n 'H25': cs_he25,\n 'H41': '40.9 - 53.0 MeV/n',\n 'INT': '>25 MeV integral'}\n\n return df, meta", "def test_dhr():\n f = Level3File(get_test_data('nids/KOUN_SDUS54_DHRTLX_201305202016'))\n assert f.metadata['avg_time'] == datetime(2013, 5, 20, 20, 18)", "def save_data_to_hdf(time_period, save_filename):\n hdf_file = pd.HDFStore(os.path.join(conf.DATA_DIR,save_filename),'w')\n try:\n for i in time_period:\n input_file = \"disk_sample_smart_log_%s.csv\" % i\n df = pd.DataFrame()\n for sub_df in pd.read_csv(os.path.join(conf.DATA_DIR, input_file), chunksize=1e+5, index_col=0):\n df = pd.concat([df, sub_df]) \n logger.info('%s 的数据读入完成,开始准备标记' % i )\n df = tag_data(df)\n logger.info('%s 的数据标记完成,存入h5文件' % i )\n hdf_file.append(key='data', value=df,format='table', data_columns=True)\n del df\n logger.info('%s 的数据处理完成' % i )\n finally:\n hdf_file.close()", "def test_4d_stream_time():\n fmask = \"common_data/4d_pipe/full4D.fid\"\n dic,data = ng.pipe.read_lowmem(fmask)\n\n fname = \"common_data/4d_pipe/time_2index/test02006.fid\"\n sdic,sdata = ng.pipe.read(fname)\n\n assert data.shape == (8, 12, 16, 1400)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2,3].real,2) == -395.11\n assert round(data[0,1,2,3].imag,2) == 52.72\n assert round(data[5,9,11,987].real,2) == -35.09\n assert round(data[5,9,11,987].imag,2) == 33.07\n\n # check the slice\n assert sdata.shape == (16, 1400)\n assert sdata.dtype == 'complex64'\n assert round(sdata[1,2].real,2) == 75.93\n assert round(sdata[1,2].imag,2) == 5.55\n assert round(sdata[7,800].real,2) == -8.93\n assert round(sdata[7,800].imag,2) == -10.24\n\n # slice/data matching\n assert_array_equal(data[1,5],sdata)\n\n lowmem_write_readback(dic,data)", "def test_plt_mag_time():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n\n # create the arrays per filter and readout pattern\n nrsrapid_f140x, nrsrapid_f110w, nrsrapid_clear = [], [], []\n nrsrapidd6_f140x, nrsrapidd6_f110w, nrsrapidd6_clear = [], [], []\n filter_used, readout = ta.source.data['tafilter'], ta.source.data['readout']\n max_val_box, time_arr = ta.source.data['max_val_box'], ta.source.data['time_arr']\n for i, val in enumerate(max_val_box):\n if '140' in filter_used[i]:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(val)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(val)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif '110' in filter_used[i]:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(val)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(val)\n nrsrapidd6_clear.append(np.NaN)\n else:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(val)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(val)\n # add to the bokeh data structure\n ta.source.data[\"nrsrapid_f140x\"] = nrsrapid_f140x\n ta.source.data[\"nrsrapid_f110w\"] = nrsrapid_f110w\n ta.source.data[\"nrsrapid_clear\"] = nrsrapid_clear\n ta.source.data[\"nrsrapidd6_f140x\"] = nrsrapidd6_f140x\n ta.source.data[\"nrsrapidd6_f110w\"] = nrsrapidd6_f110w\n ta.source.data[\"nrsrapidd6_clear\"] = nrsrapidd6_clear\n result = ta.plt_mag_time()\n\n assert bokeh_plot_type == type(result)", "def get_data(\n establishment_number=None,\n columns_to_drop=[],\n year_to_separate=2017,\n path_to_file=\"data/data_all.gz\",\n drop_na=False,\n drop_value_below_threshold=None,\n):\n date_col = \"Date\"\n visitors = \"visitors\"\n data = pd.read_csv(path_to_file, sep=\";\", decimal=\",\", index_col=\"id\")\n data[date_col] = pd.to_datetime(data[date_col])\n\n if drop_na:\n data.dropna(inplace=True)\n\n\n if drop_value_below_threshold is None:\n data.loc[data[visitors] < 0, visitors] = 0\n else:\n data = data[data[\"visitors\"] >= drop_value_below_threshold]\n\n\n if establishment_number is not None:\n data = data[data[\"library\"] == establishment_number]\n\n # Separate data in train and test DataFrame\n train = data[data.loc[:, date_col].dt.year < year_to_separate]\n test = data[data.loc[:, date_col].dt.year >= year_to_separate]\n\n # Drop columns\n to_drop = [date_col, \"Date:1\"] + columns_to_drop\n train.drop(to_drop, axis=1, inplace=True)\n test.drop(to_drop, axis=1, inplace=True)\n\n X_train = train.drop(visitors, axis=1)\n y_train = train[[visitors]]\n X_test = test.drop(visitors, axis=1)\n y_test = test[[visitors]]\n\n return X_train, y_train, X_test, y_test", "def save_as_hdf5(self, filename):", "def transform_to_h5():\n # this took about 10 minutes for set1\n for setname in ['set1', 'set2']:\n filename = os.path.join(ltrc_dirname, '{}.h5'.format(setname))\n f = h5py.File(filename, 'w')\n\n for name in ['train', 'valid', 'test']:\n g = f.create_group(name)\n filename = os.path.join(ltrc_dirname, '{}.{}.txt'.format(setname, name))\n X, y, q = load_svmlight_file(filename, query_id=True)\n g.create_dataset('X', data=X.todense(), compression='gzip')\n g.create_dataset('y', data=y, compression='gzip')\n g.create_dataset('q', data=q, compression='gzip')\n f.close()\n # Now you can do this\n # f['/valid/X'].shape\n # Out[24]: (71083, 699)", "def clean_and_save_timeseries(df):\n drop_columns = ['Lat', \n 'Long', \n 'Province/State']\n\n df.drop(columns=drop_columns, inplace = True)\n \n df_grouped = df.groupby(['Country/Region'], as_index=False).sum()\n df_grouped = df_grouped.set_index('Country/Region').transpose()\n df_grouped.reset_index(level=0, inplace=True)\n df_grouped.rename(columns={'index': 'Date'}, inplace=True)\n df_grouped['Date'] = pd.to_datetime(df_grouped['Date'])\n\n df_grouped.to_csv('../data/worldwide_timeseries.csv', index=False)", "def read_wxt_obs(years, time):\n\n met_vars = ['RH', 'Tair', 'press']\n vars = met_vars + ['time']\n filepath = ['C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/MorningBL/data/L1/' + \\\n 'Davis_BGH_' + str(i) + '_15min.nc' for i in years]\n wxt_obs_raw = eu.netCDF_read(filepath, vars=vars)\n\n\n # set up array to be filled\n wxt_obs = {}\n for met_var in met_vars:\n wxt_obs[met_var] = np.empty(len(time))\n wxt_obs[met_var][:] = np.nan\n wxt_obs['time'] = time\n\n # find data region and create an average if appropriate\n print_step = range(1000,20000, 1000)\n for t, time_t in enumerate(time):\n\n if t in print_step:\n print 't ='+str(t)\n\n # time t-1 (start of original time period, as all data is relevent for time ENDING at time_t)\n tm1 = t-1\n time_tm1 = time_t - dt.timedelta(minutes=60)\n\n # # start of time period\n # idx_extent = 8000\n # s_idx = int(eu.binary_search(wxt_obs_raw['time'], time_tm1, lo=max(0, tm1 - idx_extent),\n # hi=min(tm1 + idx_extent, len(wxt_obs_raw['time']))))\n # # end of time period\n # e_idx = int(eu.binary_search(wxt_obs_raw['time'], time_t, lo=max(0, t - idx_extent),\n # hi=min(t + idx_extent, len(wxt_obs_raw['time']))))\n\n s_idx = int(eu.binary_search(wxt_obs_raw['time'], time_tm1))\n # end of time period\n e_idx = int(eu.binary_search(wxt_obs_raw['time'], time_t))\n\n # if the time_range time and data['time'] found in this iteration are within an acceptable range (15 mins)\n tm1_diff = time_tm1 - wxt_obs_raw['time'][s_idx]\n t_diff = time_t - wxt_obs_raw['time'][e_idx]\n\n\n # _, s_idx, tm1_diff = eu.nearest(wxt_obs_raw['time'], time_tm1)\n # _, e_idx, t_diff = eu.nearest(wxt_obs_raw['time'], time_t)\n\n\n if (tm1_diff.total_seconds() <= 15 * 60) & (t_diff.total_seconds() <= 15 * 60):\n for met_var in met_vars:\n wxt_obs[met_var][t] = np.nanmean(wxt_obs_raw[met_var][s_idx:e_idx+1])\n\n\n # create RH_frac using RH data\n wxt_obs['RH_frac'] = wxt_obs['RH'] / 100.0\n\n # calculate extra variables\n e_s_hpa = 6.112 * (np.exp((17.67 * wxt_obs['Tair']) / (wxt_obs['Tair'] + 243.5))) # [hPa] # sat. v. pressure\n e_s = e_s_hpa * 100.0 # [Pa] # sat. v. pressure\n wxt_obs['e'] = wxt_obs['RH_frac'] * e_s # [Pa] # v. pressure\n wxt_obs['r_v'] = wxt_obs['e'] / (1.61 * ((wxt_obs['press']*100.0) - wxt_obs['e'])) # water_vapour mixing ratio [kg kg-1]\n wxt_obs['q'] = wxt_obs['e'] / ((1.61 * ((wxt_obs['press']*100.0) - wxt_obs['e'])) + wxt_obs['e']) # specific humidity [kg kg-1]\n wxt_obs['Tv'] = (1 + (0.61 * wxt_obs['q'])) * (wxt_obs['Tair'] + 273.15) # virtual temp [K]\n wxt_obs['air_density'] = (wxt_obs['press']*100.0) / (286.9 * wxt_obs['Tv'])# [kg m-3]\n\n return wxt_obs", "def test_radar_request_site_historic_sweep_vol_v_hdf5_timerange(default_settings):\n\n timestamp = dt.datetime.utcnow() - dt.timedelta(days=1)\n\n request = DwdRadarValues(\n parameter=DwdRadarParameter.SWEEP_VOL_VELOCITY_H,\n start_date=timestamp,\n end_date=dt.timedelta(hours=0.5),\n site=DwdRadarSite.BOO,\n fmt=DwdRadarDataFormat.HDF5,\n subset=DwdRadarDataSubset.SIMPLE,\n settings=default_settings,\n )\n\n # Verify number of elements.\n results = list(request.query())\n\n if len(results) == 0:\n raise pytest.skip(\"Data currently not available\")\n\n assert len(results) == IsInt(ge=51, le=60)\n\n hdf = h5py.File(results[0].data, \"r\")\n\n assert hdf[\"/how\"].attrs.get(\"scan_count\") == 10\n assert hdf[\"/dataset1/how\"].attrs.get(\"scan_index\") == 1\n\n timestamp = round_minutes(request.start_date, 5)\n assert hdf[\"/what\"].attrs.get(\"date\") == bytes(timestamp.strftime(\"%Y%m%d\"), encoding=\"ascii\")\n assert hdf[\"/what\"].attrs.get(\"time\").startswith(bytes(timestamp.strftime(\"%H%M\"), encoding=\"ascii\"))" ]
[ "0.61954635", "0.5543161", "0.5523801", "0.5363741", "0.5354672", "0.53035545", "0.5271555", "0.52168626", "0.5188428", "0.51519525", "0.5116143", "0.51147294", "0.50999475", "0.5096456", "0.5094596", "0.50637907", "0.49918783", "0.49796364", "0.49706373", "0.49468142", "0.49386546", "0.49321926", "0.4927503", "0.4917642", "0.49071816", "0.4905046", "0.4900048", "0.489509", "0.48910025", "0.48888424" ]
0.5730075
1
Test that the queue module was properly imported
def test_queue_class_exists(): assert Queue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_init():\n from parenthetics import Queue\n my_queue = Queue()\n assert isinstance(my_queue, Queue)", "def test_queue_not_installed(): # pragma: windows\n nt.assert_equal(IPCComm.get_queue(), None)", "def test_qm_project_python_testing_imported():\n assert \"qm_project_python_testing\" in sys.modules", "def test_ipc_queues():\n IPCComm.ipc_queues()", "def test_queue():\n mq = IPCComm.get_queue()\n key = str(mq.key)\n assert(CommBase.is_registered('IPCComm', key))\n CommBase.unregister_comm('IPCComm', key, dont_close=True)\n nt.assert_raises(KeyError, IPCComm.remove_queue, mq)\n CommBase.register_comm('IPCComm', key, mq)\n IPCComm.remove_queue(mq)\n assert(not CommBase.is_registered('IPCComm', key))", "def run_test_module(queue, test_module):\n\n import unittest\n # Import the module\n m = importlib.import_module('.'+test_module, 'test')\n\n\n # initialize the test suite\n loader = unittest.TestLoader()\n suite = unittest.TestSuite()\n\n # add tests to the test suite\n suite.addTests(loader.loadTestsFromModule(m))\n\n # initialize a runner, pass it your suite and run it\n runner = unittest.TextTestRunner(verbosity=3)\n result = runner.run(suite)\n\n# return(result.testsRun, len(result.failures), len(result.errors))\n queue.put((result.testsRun, len(result.failures), len(result.errors)))", "def test_can_instantiate_empty_queue(empty_queue):\n assert isinstance(empty_queue, Queue)", "def test_queue_worker_needs_a_queue(self):\n with pytest.raises(ValueError):\n MinimalQueueWorker(None)", "def test_parrot_imported():\n assert \"parrot\" in sys.modules", "def test_dequeue():\n from parenthetics import Queue\n q = Queue()\n q.enqueue(0)\n assert q.dequeue() == 0", "def test_MockQueue():\n bar = tqdm()\n\n q = u.MockQueue(bar)\n q.put(100)\n assert q.bar.n == 100", "def test_module(self):\n pass", "def test_ufedmm_imported():\n assert \"ufedmm\" in sys.modules", "def test_is_empty(self):\n queue = Queue()\n self.assertEqual(queue.is_empty(), True)\n queue.enqueue(1)\n self.assertEqual(queue.is_empty(), False)", "def test_enqueue(self):\n queue = Queue()\n self.assertEqual(queue.size(), 0)\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n self.assertEqual(queue.size(), 3)", "def test_priority_que_init():\n from src.priorityq import PriorityQueue\n with pytest.raises(ValueError):\n new_pqueue = PriorityQueue(1)", "def testQueueSend(self):\n self.mgr.queueMsg(37)\n self.assertTrue( self.mgr.msgQueue.empty() )\n self.v.send_mavlink.assert_called_with(37)", "def test_rlmm_imported():\n assert \"rlmm\" in sys.modules", "def test_size_of_new_queue():\n queue = Queue()\n assert queue.size() == 0", "def test_molecool_imported():\n assert \"molecool\" in sys.modules", "def testQueueMsg(self):\n self.mgr.isGoproBusy = True\n self.mgr.lastRequestSent = monotonic.monotonic()\n self.mgr.queueMsg(4)\n self.assertFalse( self.mgr.msgQueue.empty() )\n self.assertTrue(self.mgr.isGoproBusy)", "def test_xchemOT_imported():\n assert \"xchemOT\" in sys.modules", "def test_the_queue_enqueue(the_queue):\n the_queue.enqueue(2)\n assert the_queue._new_dll.head.data == the_queue._new_dll.tail.data == 2", "def test_pull_empty_queue(self) -> None:\n\n self.plugin.pull()\n\n self.assertEqual(\n len(self.plugin.queue),\n 0\n )", "def run_tests(queue_implementation):\n queue = queue_implementation()\n assert queue.is_empty()\n queue.enqueue(1)\n queue.enqueue(2)\n assert queue.dequeue() == 1\n queue.enqueue(3)\n queue.enqueue(4)\n assert queue.dequeue() == 2\n assert queue.dequeue() == 3\n assert queue.dequeue() == 4\n assert queue.is_empty()", "def setUpClass(cls):\n cls.queue = RabbitQueue(QUEUE_CONN_PARAMS)", "def test_publish1(self):\n publish = self.queue.publish(TEST_QUEUE, 'this is a test msg')\n assert publish", "def test_imports():\n assert False", "def test_size(self):\n queue = Queue()\n self.assertEqual(queue.size(), 0)\n queue.enqueue(1)\n self.assertEqual(queue.size(), 1)", "def test_ipcrm_queues_not_isntalled(): # pragma: windows\n IPCComm.ipcrm_queues()" ]
[ "0.74117076", "0.68037856", "0.66626453", "0.6641566", "0.65427667", "0.6481025", "0.64770365", "0.6446653", "0.64384985", "0.6342957", "0.63057685", "0.6301307", "0.6292537", "0.62752134", "0.6245541", "0.61794436", "0.61595434", "0.61325556", "0.61323863", "0.61238384", "0.6097019", "0.6084545", "0.6084495", "0.60747015", "0.60669726", "0.6066834", "0.60569125", "0.6048075", "0.60369337", "0.5989826" ]
0.804297
0
Test that the queue's length increases after enqueuing a new value
def test_insertion_of_value_increases_length(empty_queue): assert len(empty_queue) == 0 empty_queue.enqueue(100) assert len(empty_queue) == 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_size_increments_with_enqueue():\n queue = Queue()\n queue.enqueue('val')\n assert queue.size() == 1", "def test_the_queue_size(the_queue):\n the_queue.enqueue(1)\n the_queue.enqueue(2)\n the_queue.enqueue(3)\n assert the_queue.size() == 3", "def test_size(self):\n queue = Queue()\n self.assertEqual(queue.size(), 0)\n queue.enqueue(1)\n self.assertEqual(queue.size(), 1)", "def test_the_queue_enqueue(the_queue):\n the_queue.enqueue(2)\n assert the_queue._new_dll.head.data == the_queue._new_dll.tail.data == 2", "def test_enqueue(self):\n queue = Queue()\n self.assertEqual(queue.size(), 0)\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n self.assertEqual(queue.size(), 3)", "def test_size_decrements_with_dequeue():\n queue = Queue()\n queue.enqueue('val')\n assert queue.size() == 1\n queue.dequeue()\n assert queue.size() == 0", "def enQueue(self, value):\n if not self.isFull():\n self.queue.append(value)\n self.rear += 1\n return True\n else:\n return False", "def enQueue(self, value):\r\n if (len(self.queue) >= self.maxlen):\r\n return False\r\n else:\r\n self.queue.append(value)\r\n return True", "def test_size_of_new_queue():\n queue = Queue()\n assert queue.size() == 0", "def test_size_stays_same_with_peak():\n queue = Queue()\n queue.enqueue('val')\n assert queue.size() == 1\n queue.peak()\n assert queue.size() == 1", "def enQueue(self, value: int) -> bool:\n if self.count == self.capacity:\n return False\n self.queue[(self.headIndex + self.count) % self.capacity] = value\n self.count += 1\n return True", "def enQueue(self, value):\n if self.rear - self.front < self.size:\n self.queue.append(value)\n self.rear += 1\n return True\n else:\n return False", "def enQueue(self, value):\n if self.count == self.capacity:\n return False\n self.queue[(self.headIndex + self.count) % self.capacity] = value\n self.count += 1\n return True", "def enQueue(self, value):\n if not self.isFull():\n self.queue.append(value)\n return True\n else:\n return False", "def test_enquque_val():\n queue = Queue()\n anode = queue.enqueue('a')\n assert queue._queue.first_node is anode", "def enQueue(self, value):\n \n if not self.isFull():\n if self.start == -1:\n self.start = 0\n self.end = (self.end + 1) % self.max_length\n self.queue[self.end] = value\n return True\n else:\n return False", "def test_enquque_enqueue_check_last_node_val():\n queue = Queue()\n anode = queue.enqueue('a')\n bnode = queue.enqueue('b')\n assert queue._queue.last_node is bnode", "def test_size_empty(the_queue):\n assert the_queue.size() == 0", "def enQueue(self, value: int) -> bool:\n # automatically acquire the lock when entering the block\n with self.queueLock:\n if self.count == self.capacity:\n return False\n self.queue[(self.headIndex + self.count) % self.capacity] = value\n self.count += 1\n # automatically release the lock when leaving the block\n return True", "def test_append_increases_size_of_list(val, filled_deque):\n filled_deque.append(val)\n assert filled_deque._container.head.val == val", "def test_is_empty(self):\n queue = Queue()\n self.assertEqual(queue.is_empty(), True)\n queue.enqueue(1)\n self.assertEqual(queue.is_empty(), False)", "def test_put_element(self):\n queue = Queue_()\n queue.put(1)\n self.assertFalse(queue.empty())\n self.assertEqual(queue.size(), 1)\n self.assertEqual(queue.top(), 1)", "def test_length_of_deque_when_using_append(val, result, filled_deque):\n filled_deque.append(val)\n assert filled_deque.size() == len(result)", "def test_the_queue_dequeue_multi_values_phase_one(the_queue):\n the_queue.enqueue(2)\n the_queue.enqueue(3)\n the_queue.enqueue(4)\n the_queue.enqueue(5)\n the_queue.dequeue()\n assert the_queue._new_dll.tail.data == 3", "def enQueue(self, value: int) -> bool:\n if self.isFull():\n return False\n else:\n self.rear = (self.rear + 1) % self.maxlen\n self.circular[self.rear] = value\n self.size += 1\n return True", "def test_OpenCloseOneHundred(self):\n\n q = Queue(self.path)\n for i in range(1000):\n q.put('var%d' % i)\n del q\n q = Queue(self.path)\n self.assertEqual(1000, q.qsize())\n for i in range(1000):\n data = q.get()\n self.assertEqual('var%d' % i, data)\n q.task_done()\n with self.assertRaises(Empty):\n q.get_nowait()\n # assert adding another one still works\n q.put('foobar')\n data = q.get()", "def enQueue(self, value):\n if self.isFull():\n return False\n self.__buffer[(self.__start+self.__size) % len(self.__buffer)] = value\n self.__size += 1\n return True", "def test_the_queue_enqueue_multi_values(the_queue):\n the_queue.enqueue(2)\n the_queue.enqueue(3)\n the_queue.enqueue(4)\n the_queue.enqueue(5)\n assert (the_queue._new_dll.head.data,\n the_queue._new_dll.tail.data,\n the_queue._new_dll.head.prior_node.data,\n the_queue._new_dll.tail.next_node.data) == (5, 2, 4, 3)", "def test_the_queue_dequeue(the_queue):\n the_queue.enqueue(2)\n assert the_queue.dequeue() == 2", "def validate(self, queue):\n self.assertTrue(queue.is_empty())\n queue.enqueue(10)\n self.assertFalse(queue.is_empty())\n queue.enqueue(20)\n self.assertEqual(10, queue.dequeue())\n self.assertFalse(queue.is_empty())\n queue.enqueue(30)\n queue.enqueue(40)\n self.assertEqual(20, queue.dequeue())\n self.assertEqual(30, queue.dequeue())\n self.assertEqual(40, queue.dequeue())" ]
[ "0.8218401", "0.8080756", "0.7737941", "0.76956904", "0.7611223", "0.7446187", "0.73378056", "0.7295381", "0.7244632", "0.72361594", "0.710653", "0.70698786", "0.7049939", "0.70439875", "0.704351", "0.70268536", "0.6978875", "0.6974477", "0.69701475", "0.6932488", "0.6918494", "0.6918291", "0.6893175", "0.6880635", "0.6870602", "0.6851297", "0.6815437", "0.68118584", "0.6804115", "0.6783972" ]
0.8624658
0
Test that the front property is none prior to enqueuing
def test_default_value_of_front(empty_queue): assert empty_queue.front is None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def i_am_at_the_front(self):\n return self is self.queue[0]", "def empty_queue(queue):\n return queue.front is None", "def test_enquque_enqueue_check_first_node_val():\n queue = Queue()\n anode = queue.enqueue('a')\n bnode = queue.enqueue('b')\n assert queue._queue.first_node is anode", "def i_am_at_the_front(self):\n return self is self.hist._queue[0]", "def empty(self):\r\n return self.point_to_head.chi == None\r\n\r\n\r\n\r\n # Your MyQueue object will be instantiated and called as such:\r\n # obj = MyQueue()\r\n # obj.push(x)\r\n # param_2 = obj.pop()\r\n # param_3 = obj.peek()\r\n # param_4 = obj.empty()", "def test_cant_peekleft_empty(empty_deque):\n assert empty_deque.peekleft() is None", "def test_initialization_with_empty_list_first_node_check():\n queue = Queue([])\n assert queue._queue.first_node is None", "def test_is_empty(self):\n queue = Queue()\n self.assertEqual(queue.is_empty(), True)\n queue.enqueue(1)\n self.assertEqual(queue.is_empty(), False)", "def test_new_deque_exists_with_no_head(new_empty_deque):\n assert new_empty_deque._container.head is None", "def test_cant_peek_empty(empty_deque):\n assert empty_deque.peek() is None", "def test_peekleft_return_none_when_empty(new_empty_deque):\n assert new_empty_deque.peekleft() is None", "def is_empty(self):\n if self.front == None:\n return True\n else:\n return False", "def test_peek(self):\n queue = Queue()\n self.assertEqual(queue.peek(), None)\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n self.assertEqual(queue.peek(), 1)\n self.assertEqual(queue.size(), 3)", "def is_empty(self):\n if self.front:\n return False\n return True", "def test_peek_left_empty_list():\n from deque import Deque\n dq = Deque()\n assert dq.peek_left() is None", "def test_enquque_val():\n queue = Queue()\n anode = queue.enqueue('a')\n assert queue._queue.first_node is anode", "def noqueue(self) -> bool:\n return not self.orders", "def is_empty(self):\n\n if self.front == None:\n return True\n else:\n return False", "def test_enquque_enqueue_check_last_node_val():\n queue = Queue()\n anode = queue.enqueue('a')\n bnode = queue.enqueue('b')\n assert queue._queue.last_node is bnode", "def __isTileInFrontier(self, tile):\n return self.frontier.isQueueContainsElement(tile)", "def test_peek_return_none_when_empty(new_empty_deque):\n assert new_empty_deque.peek() is None", "def test_the_peek(the_queue):\n the_queue.enqueue(1)\n the_queue.enqueue(2)\n the_queue.enqueue(3)\n the_queue.dequeue()\n assert the_queue._new_dll.tail.data == 2", "def test_initialization_with_empty_list_last_node_check():\n queue = Queue([])\n assert queue._queue.last_node is None", "def is_empty(self):\n return not bool(self.front)", "def test_full_deque_is_full(full_deque):\n assert full_deque._deque.head.value == 3", "def test_peek_empty_list():\n from deque import Deque\n dq = Deque()\n assert dq.peek() is None", "def validate(self, queue):\n self.assertTrue(queue.is_empty())\n queue.enqueue(10)\n self.assertFalse(queue.is_empty())\n queue.enqueue(20)\n self.assertEqual(10, queue.dequeue())\n self.assertFalse(queue.is_empty())\n queue.enqueue(30)\n queue.enqueue(40)\n self.assertEqual(20, queue.dequeue())\n self.assertEqual(30, queue.dequeue())\n self.assertEqual(40, queue.dequeue())", "def any(self) -> bool:\n return len(self.queue) > 0", "def __nonzero__(self):\n if self._pushed:\n return True\n try:\n self.push(self.next())\n except StopIteration:\n return False\n return True", "def isEmpty(self):\n return self.front is None" ]
[ "0.7659449", "0.7099113", "0.6992859", "0.6959401", "0.69548464", "0.6852271", "0.67845005", "0.67837906", "0.6695713", "0.6672456", "0.6582717", "0.6582628", "0.657398", "0.65670973", "0.656031", "0.6504475", "0.64835936", "0.6478431", "0.6462574", "0.64194036", "0.64177316", "0.64043564", "0.63869786", "0.63841796", "0.6348708", "0.6337927", "0.63148147", "0.6311283", "0.6306825", "0.63044465" ]
0.7998886
0
Append service to the service name
def append_service_to_name(self, data, **kwargs): data['name'] = f'{data.get("name").upper()}_SERVICE' return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def service_name(self) -> str:\n return pulumi.get(self, \"service_name\")", "def service_name(self) -> str:\n return pulumi.get(self, \"service_name\")", "def service_name(self):\n return self._service_name", "def getServiceName(self) -> str:\n ...", "def get_full_service_name(service, s):\n service_alias = SERVICE_MAPPING[service] if service in SERVICE_MAPPING else service\n\n # Don't suffix service type if it's a web worker or the same as the service (e.g. gateway, haproxy)\n if s == 'web' or service_alias == s:\n return service_alias\n return '{}-{}'.format(service_alias, s)", "def _form_service_key(self, service_name, service_addr):\n return '/'.join((service_name, service_addr))", "def service_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_name\")", "def service_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_name\")", "def service_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_name\")", "def service_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"service_name\")", "def service_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"service_name\")", "def service_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"service_name\")", "def set_service_name(name):\n emit(UPDATE_SERVICE_SIGNAL, BREADCRUMB_SENDER, name=name)", "def _remember_service_name(self, event):\n service_name = event[\"arguments\"][\"service_name\"]\n # We've added logging of the service_handle to the API signature in\n # the Monitor, but for backwards compatibility we'll keep it as\n # follows for now.\n service_handle = \"0x%08x\" % event[\"return_value\"]\n self.services[service_handle] = service_name", "def add_service(self, service):\n self.app.add_service(service)", "def _k8s_service_name(self):\n return \"{}-ssh-service\".format(self.app.name)", "def service_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_name\")", "def service_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_name\")", "def service_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_name\")", "def SERVICE_NAME(default=None):\n return ParamStore.get('SERVICE_NAME', default=default, store=ParamStore.Stores.OS).value", "def get_service_name(wrapped, instance, args, kwargs):\n if \"serviceAbbreviation\" not in instance._service_model.metadata:\n return instance._service_model.metadata[\"endpointPrefix\"]\n return instance._service_model.metadata[\"serviceAbbreviation\"]", "def screename(service=''):\r\n\r\n def _make_name(a, b):\r\n return ''.join(_random.sample(string.ascii_letters,\r\n _random.choice(range(a, b))))\r\n\r\n if service in ('', 'aim', 'aol'):\r\n name = _make_name(3, 16)\r\n if service == 'aol':\r\n return name + '@aol.com'\r\n else:\r\n return name\r\n elif service is 'skype':\r\n name = _make_name(6, 32)\r\n return name\r\n elif service is 'google':\r\n name = _make_name(1, 19)\r\n return name + '@google.com'\r\n else:\r\n name = _make_name(8, 20)\r\n return name", "def service(self) -> str:\n return pulumi.get(self, \"service\")", "def register_service(self, service: str, cb: Callable, **kwargs: Optional[Any]) -> None:\n self._check_service(service)\n d, s = service.split(\"/\")\n self.logger.debug(\"register_service: %s/%s, %s\", d, s, kwargs)\n\n namespace = self._get_namespace(**kwargs)\n\n if \"namespace\" in kwargs:\n del kwargs[\"namespace\"]\n\n kwargs[\"__name\"] = self.name\n\n self.AD.services.register_service(namespace, d, s, cb, __async=\"auto\", **kwargs)", "def register_service(service, iface, name):", "def addService(self, service):\n\t\tself.services.append(service)\n\t\treturn self", "def build_service_url(cluster_name, service_name) -> str:\n return \"https://us-west-2.console.aws.amazon.com/ecs/home?region=us-west-2#/clusters/{0}/services/{1}/details\".format(cluster_name, service_name)", "def service(self, service):\n \n self._service = service", "def get_service_name(service, rem):\n flavor = rem.os.package_type\n try:\n return _SERVICE_MAP[service][flavor]\n except KeyError:\n return None", "def name(self):\n return \"systemd Service\"" ]
[ "0.72060347", "0.72060347", "0.71748763", "0.7068025", "0.7060443", "0.70234215", "0.69514245", "0.69514245", "0.69514245", "0.69364977", "0.69364977", "0.69364977", "0.6925165", "0.67653453", "0.6744948", "0.6667049", "0.6590735", "0.6590735", "0.6590735", "0.65813726", "0.65705985", "0.65089905", "0.65025777", "0.65013903", "0.6382989", "0.63072455", "0.63054943", "0.6260715", "0.62306404", "0.62222695" ]
0.783467
0
Copies the given file or folder to the target folder, whereby the file/folder name is constructed by appending the current date (and possibly time) to the file/folder name. If another copies exist in the given target folder, the old ones are deleted to maintain at most num_copies of. The command creates a new copy only if at least min_period_days is elapsed since the last existing copy or there is no existing copies in the target folder. New backup is always created if min_period_days is 0.
def multicopy(src_file_or_dir, target_dir, num_copies, min_period_days=0, target_base_name=None, append_time=True, ignore_errors=False, reporter=None): def log_info(message): if reporter: reporter.info(message) else: print(message) #print(src_file_or_dir, target_dir, target_base_name, min_period_days, num_copies) #parse source file/folder name, detect mode ("file" or "folder") MODE_FILE = "file" MODE_DIR = "dir" if os.path.isfile(src_file_or_dir): mode = MODE_FILE src_file_or_dir_name, src_file_extension = os.path.splitext(os.path.basename(src_file_or_dir)) elif os.path.isdir(src_file_or_dir): mode = MODE_DIR src_file_or_dir_name, src_file_extension = os.path.basename(src_file_or_dir), "" else: raise Exception("Source path '{0}' does not reference an existing file or directory!" .format(src_file_or_dir)) #check target dir if not os.path.isdir(target_dir): raise Exception("Target directory '" + target_dir + "' does not exist or is not a directory!") #print(src_file_or_dir_name, src_file_extension) #get current date/time current_date = date.today() current_date_time = datetime.now() #calc target base name if target_base_name is None: target_base_name = src_file_or_dir_name; #construct new file/folder name date_time_str = current_date_time.strftime("%Y-%m-%d") if append_time: date_time_str = date_time_str + "_" + current_date_time.strftime("%H-%M") new_file_or_dir_name = target_base_name + "_" + date_time_str + src_file_extension new_file_or_dir_path = os.path.join(target_dir, new_file_or_dir_name) #print(new_file_or_dir_name, new_file_or_dir_path) #get the list of all existing backup files or folders #sort existing backups in date-reverse order (newer files/folders first) existing_backups = glob.glob(os.path.join(target_dir, target_base_name + "_*" + src_file_extension)) existing_backups.sort() existing_backups.reverse() #print(existing_backups) #get the last existing backup (if any), parse date (sets min_period_days = 0 if parse error or no existing backup) if len(existing_backups) > 0: if mode == MODE_FILE: last_backup_name = os.path.splitext(os.path.basename(existing_backups[0]))[0] elif os.path.isdir(src_file_or_dir): last_backup_name = os.path.basename(existing_backups[0]) last_backup_date_string = last_backup_name[-10:] try: last_backup_date = date(*strptime(last_backup_date_string, "%Y_%m_%d")[0:3]) except ValueError: #invalid format => set to today, but period to 0 (to force backup) last_backup_date = current_date min_period_days = 0 else: last_backup_date = current_date min_period_days = 0 #print last_backup_date_string, last_backup_date, min_period_days, (current_date - last_backup_date).days #back up the file or folder, if needed if min_period_days == 0 or (current_date - last_backup_date).days >= min_period_days: if mode == MODE_FILE: log_info("Copying '{0}' to '{1}'...".format(src_file_or_dir, new_file_or_dir_path)) if os.path.isfile(new_file_or_dir_path): os.remove(new_file_or_dir_path) try: shutil.copyfile(src_file_or_dir, new_file_or_dir_path) except IOError: if ignore_errors: log_info("\n\nFollowing file could not be copied: '{0}'.".format(src_file_or_dir)) else: raise else: log_info("Copying source folder to '" + new_file_or_dir_path + "'...") if os.path.isdir(new_file_or_dir_path): shutil.rmtree(new_file_or_dir_path) try: shutil.copytree(src_file_or_dir, new_file_or_dir_path) except shutil.Error as err : non_copied_files = err.args[0] if ignore_errors: log_info ("\n\nFollowing", len(non_copied_files), "files could not be copied:") for non_copied_file in non_copied_files: non_copied_file_src = non_copied_file[0] log_info(" " + non_copied_file_src) else: raise log_info("Done") else: log_info("Skiping backup because the last existing backup is new enough.") #cleaning up existing backups log_info("Cleaning up existing copies...") #again get the list of all existing backup files or folders (now includes the new backup) #sort existing backups in date-reverse order (newer files/folders first) existing_backups = glob.glob(os.path.join(target_dir, target_base_name + "_*" + src_file_extension)) existing_backups.sort() existing_backups.reverse() #print(existing_backups) #delete out-of-date files/folders (all starting at num_copies) for existingBackup in existing_backups[num_copies:]: if mode == MODE_FILE: os.remove(existingBackup) else: shutil.rmtree(existingBackup) #Cleaning up done log_info("Done")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cp(src, dst):\n os.makedirs(os.path.dirname(dst), exist_ok=True)\n src_stat = os.stat(src)\n try:\n dst_stat = os.stat(dst)\n except FileNotFoundError:\n dst_stat = (0,)*10\n src_modif_time = src_stat[stat.ST_MTIME]\n dst_modif_time = dst_stat[stat.ST_MTIME]\n if src_modif_time > dst_modif_time:\n shutil.copyfile(src, dst)\n print(\" ++\", dst[len(THESIS_DIR):])\n else:\n print(\" --\", dst[len(THESIS_DIR):])", "def copyFile(source, target):\n\tfrom shutil import copyfile, copystat, copymode\n\tfrom os.path import split\n\tsource = adaptPath(source)\n\ttarget = adaptPath(target)\n\tif int(getFileModifTime(source)) != int(getFileModifTime(target)):\n\t\tmakedir(split(target)[0])\n\t\tcopyfile(source, target)\n\t\tcopystat(source, target)\n\t\tcopymode(source, target)\n\t#~ else:\n\t\t#~ print (\"%s not copied\"%(target))", "def create_backup_file(*args):\n\n for file in list(*args):\n try:\n date_pattern = BDate.date_pattern()\n BDate.copy_file(file, f'{file}.{date_pattern}')\n except FileNotFoundError as err:\n print(f'[ERROR] {err.filename} : No such file or directory')", "def get_target_files_patiently(initial_image_dir, run_number, target_dir, wait_period_sec=30.0, interval_period_sec=5.0, for_main_image_files=True):\n\tstable_files_count_time = 0.0\n\tsource_files = get_files_to_copy(initial_image_dir, run_number)\n\tfile_count = len(source_files)\n\ttime_val = time.time()\n\tprint('\\nWaiting for stable file count. Current number of files to copy:{} stable file count time: {} (sec)\\n'.format(file_count, stable_files_count_time))\n\twhile stable_files_count_time < wait_period_sec:\n\t\ttime.sleep(interval_period_sec)\n\t\tif for_main_image_files:\n\t\t\tsource_files_now = get_files_to_copy(initial_image_dir, run_number)\n\t\telse:\n\t\t\tsource_files_now = get_tpx3_files_to_copy(initial_image_dir)\n\t\tfile_count_now = len(source_files_now)\n\t\ttime_val_now = time.time()\n\t\tif file_count_now == file_count:\n\t\t\tstable_files_count_time += (time_val_now - time_val)\n\t\telse:\n\t\t\tstable_files_count_time = 0.0\n\t\ttime_val = time_val_now\n\t\tsource_files = source_files_now\n\t\tfile_count = file_count_now\n\t\tprint('\\nWaiting for stable file count. Current number of files to copy:{} stable file count time: {} (sec)\\n'.format(file_count, stable_files_count_time))\n\n\ttarget_files = []\n\tfor source_file in source_files:\n\t\thead_tail = os.path.split(source_file)\n\t\ttarget_files.append(os.path.join(target_dir, head_tail[1]))\n\treturn target_files", "def _archiveDataByDate(self, src, dest):\n root = os.getcwd()\n srcPath = join(root,src)\n destPath = join(root,dest)\n f = [] #Array with list of files in directory\n fDate = [] #Array with list of files with certain date;\n s = [] #Array with list of files successfully copied\n for (dirpath, dirnames, filenames) in walk(srcPath):\n f.extend(filenames)\n if len(f) > 0:\n for i in f:\n match = re.search(r'\\d{4}-\\d{2}-\\d{2}', i)\n if str(i) != 'archiving_log.txt' and str(i) != 'archiving_log.txt~' and str(i) != 'archivingScript.py' and match.group() == self.date:\n try:\n buffer_size = int(20000)\n fileSrcPath = join(dirpath, i)\n fileDestPath = join(destPath, i)\n with open(fileSrcPath, 'rb') as fsrc:\n with open(fileDestPath, 'wb') as fdest:\n copy = shutil.copyfileobj(fsrc,fdest,buffer_size)\n copy\n self._backupLog('Copy Operation File: '+str(i)+ '\\t' + 'Time: '+ str(time.strftime(\"%H:%M:%S\")) +'\\n') #+ '\\t'+ 'Path: '+ str(srcPath)\n s.append(i)\n except shutil.Error as e:\n self._backupLog('Error: %s' % e + '\\t' + 'File: '+str(i)+ '\\t' + 'Time: '+ str(time.strftime(\"%H:%M:%S\")) + '\\n')\n except IOError as e:\n self._backupLog('Error: %s' % e.strerror + '\\t' + 'File: '+str(i)+ '\\t' + 'Time: '+ str(time.strftime(\"%H:%M:%S\")) + '\\n')\n if len(s) >0:\n for (dirpath,dirnames,filenames) in walk(srcPath):\n for cfile in f:\n for sfile in s:\n if cfile == sfile:\n try:\n filetoDelete = join(srcPath, cfile)\n os.remove(filetoDelete)\n self._backupLog('Delete Operation File: '+str(cfile)+ '\\t' + 'Time: '+ str(time.strftime(\"%H:%M:%S\")) + '\\n')\n except OSError, e:\n self._backupLog('Error deleting file: %s - %s.' % (e.filename, e.strerror) + '\\t' + 'Time: '+ str(time.strftime(\"%H:%M:%S\")) + '\\n')", "def copy(src_file_name, target_folder, file_name):\n\n global CURRENT_PROGRESS\n global TOTAL_FILE_NUM\n CURRENT_PROGRESS += 1\n\n mkdir(target_folder)\n target_file = os.path.join(target_folder, file_name)\n\n if os.path.exists(target_file):\n src_md5 = md5(src_file_name)\n\n # iterate target folder to check if file already exists\n for file_in_target in os.listdir(target_folder):\n entry = os.path.join(target_folder, file_in_target)\n target_md5 = md5(entry)\n if src_md5 == target_md5:\n log(\"(\" + str(CURRENT_PROGRESS) + \"/\" + str(TOTAL_FILE_NUM) + file_name + \"file exists, ignore COPY. <-- \" + src_file_name)\n return\n\n word_list = file_name.split('.')\n num_of_files = len(\n [f for f in os.listdir(target_folder) \\\n if os.path.isfile(os.path.join(target_folder, f))])\n file_name = word_list[0] + '(' + str(num_of_files) + ').' + word_list[1]\n target_file = os.path.join(target_folder, file_name)\n\n if DELETE_AFTER_COPY:\n log(\"(\" + str(CURRENT_PROGRESS) + '/' + str(TOTAL_FILE_NUM) + \")MOVE: \" + src_file_name + \" --->\" + target_file)\n shutil.move(src_file_name, target_file)\n else:\n log(\"(\" + str(CURRENT_PROGRESS) + '/' + str(TOTAL_FILE_NUM) + \")COPY: \" + src_file_name + \" --->\" + target_file)\n shutil.copy(src_file_name, target_file)", "def make_wb_copy():\r\n shutil.copy(full_target_file_name, path_name + copied_file_name) # copy the file\r", "def create_path_by_date(dest, dt):\n if not os.path.isdir(dest):\n raise FileNotFoundError(f\"dest {dest} must be valid path\")\n yyyy, mm, dd = dt[0:3]\n yyyy = str(yyyy).zfill(4)\n mm = str(mm).zfill(2)\n dd = str(dd).zfill(2)\n new_dest = os.path.join(dest, yyyy, mm, dd)\n if not os.path.isdir(new_dest):\n os.makedirs(new_dest)\n return new_dest", "def runbackup_freqs_single(backupfunc, rootfolder, newdate, maxbackups):\n\n # rootfolder should be something like regbackup/M5/\n if not os.path.isdir(rootfolder):\n os.makedirs(rootfolder)\n\n dirs = os.listdir(rootfolder)\n dirs = sorted(dirs)\n\n # verify all zips have same length as latestdir\n lengths = [len(folder) for folder in dirs]\n if len(set(lengths)) > 1:\n raise ValueError('More than one length of dirs in folder. Lengths: ' + str(lengths) + '. Folder: ' + str(rootfolder) + '.')\n if len(lengths) == 1 and len(dirs[0]) != len(newdate):\n raise ValueError('newdate does not match dates in the existing folder. Current date format: ' + str(dirs[0]) + '. New date: ' + str(newdate) + '.')\n\n if newdate in dirs:\n # newdate already in dirs so backup already exists - can stop\n return(None)\n\n if len(dirs) >= maxbackups:\n shutil.move(rootfolder / Path(dirs[0]), rootfolder / Path(newdate))\n\n backupfunc(rootfolder / Path(newdate))", "def _copyFile(self, source, dstDir):\n dstFile = os.path.join(dstDir, os.path.basename(source))\n touch = \"/usr/bin/touch\" if OSUtilities.isMacOS() else \"/bin/touch\"\n subprocess.call([touch, dstFile])\n subprocess.call([\"/bin/cp\", source, dstDir])\n self._logger.info(\"Copying file \" + source + \" to \" + dstDir)\n self._numCopiedFiles += 1", "def fix_file_dates(source_file_name, dest_file_name):\n shutil.copystat(source_file_name, dest_file_name)\n print(\"Fixed dates for \" + dest_file_name)", "def copyseries(self, suuid, targetdir):\n seriesfiles = self.controller.db.getFiles(suuid)\n dest = False\n if seriesfiles is not None and len(seriesfiles) > 0:\n dest = join(targetdir, suuid)\n if not exists(dest):\n mkdir(dest)\n for f in seriesfiles:\n if not exists(join(dest, basename(f))):\n shutil.copy(f, dest)\n return dest", "def conditional_copy(copy_tuples):\n for source_file, destination_file in copy_tuples:\n # If the root_directory and destination file contents are the same, don't perform unnecessary file I/O\n if not destination_file.exists() or not filecmp.cmp(source_file, destination_file, shallow=False):\n destination_file.parent.mkdir(parents=True, exist_ok=True)\n shutil.copyfile(source_file, destination_file)", "def send_file(filepath, dst_folder_name, copy=True, date=True):\n path = Path(filepath)\n dst_folder = path.parent.joinpath(dst_folder_name)\n if not dst_folder.exists():\n dst_folder.mkdir(parents=True)\n \n if date:\n today = datetime.today().strftime('%Y-%h-%d-%H-%M')\n save_path = dst_folder.joinpath(f'{path.stem}-{today}{path.suffix}')\n else:\n save_path = dst_folder.joinpath(f'{path.name}')\n \n if copy:\n shutil.copyfile(path, save_path)\n else:\n shutil.move(path, save_path)", "def runbackup_freqs(backupfunc, rootbackupfolder, freqs):\n\n # first create root backup path\n if not os.path.isdir(rootbackupfolder):\n os.makedirs(rootbackupfolder)\n\n # this is the default maxbackup\n maxbackupsdict = {'M5': 12, 'H1': 24, 'd1': 12, 'd10': 10, 'm1': 13}\n\n if freqs is None:\n raise ValueError('Need to specify frequencies with which to do the backups.')\n \n # freqs is either M5 or M5_10 meaning back up every 5 mins with a max of 10 backups\n # go through here and get maxbackups as a function of \n freqsonly = []\n for freq in freqs:\n freqsplit = freq.split('_')\n freqsonly.append(freqsplit[0])\n if len(freqsplit) > 1:\n if len(freqsplit) > 2:\n raise ValueError('Weird definition of frequency of backup: ' + str(freq) + '.')\n try:\n # adjust default maxbackups\n maxbackupsdict[freqsplit[0]] = int(freqsplit[1])\n except Exception:\n raise ValueError('Weird definition of frequency of backup: ' + str(freq) + '.')\n\n # verify no weird terms included in freqs\n badterms = set(freqsonly) - {'M5', 'H1', 'd1', 'd10', 'm1'}\n if len(badterms) > 0:\n raise ValueError('Following bad terms in freqs: ' + str(badterms) + '.')\n\n # get relevant date base folder names:{{{\n now = datetime.datetime.now()\n \n H1_strf = now.strftime(\"%Y%m%d_%H\")\n d1_strf = now.strftime(\"%Y%m%d\")\n m1_strf = now.strftime(\"%Y%m\")\n\n adjusted = datetime.datetime(now.year, now.month, now.day, now.hour, (now.minute // 5) * 5)\n M5_strf = adjusted.strftime(\"%Y%m%d_%H%M\")\n\n if now.day <=10:\n adjustedday = 1\n elif now.day <=20:\n adjustedday = 11\n else:\n adjustedday = 21\n adjusted = datetime.datetime(now.year, now.month, adjustedday)\n d10_strf = adjusted.strftime(\"%Y%m%d\")\n # get relevant date base folder names:}}}\n\n if 'M5' in freqsonly:\n runbackup_freqs_single(backupfunc, rootbackupfolder / Path('M5'), M5_strf, maxbackupsdict['M5'])\n if 'H1' in freqsonly:\n runbackup_freqs_single(backupfunc, rootbackupfolder / Path('H1'), H1_strf, maxbackupsdict['H1'])\n if 'd1' in freqsonly:\n runbackup_freqs_single(backupfunc, rootbackupfolder / Path('d1'), d1_strf, maxbackupsdict['d1'])\n if 'd10' in freqsonly:\n runbackup_freqs_single(backupfunc, rootbackupfolder / Path('d10'), d10_strf, maxbackupsdict['d10'])\n if 'm1' in freqsonly:\n runbackup_freqs_single(backupfunc, rootbackupfolder / Path('m1'), m1_strf, maxbackupsdict['m1'])", "def copy(self,fileName,destDir,destName=None,setMTime=False):\n if not os.path.exists(destDir): \n os.makedirs(destDir)\n if not destName: destName = fileName\n srcPath = os.path.join(self.dir,fileName)\n destPath = os.path.join(destDir,destName)\n if os.path.exists(destPath): \n os.remove(destPath)\n shutil.copyfile(srcPath,destPath)\n if setMTime:\n mtime = getmtime(srcPath)\n os.utime(destPath,(time.time(),mtime))\n self.refresh()", "def copyFile(src_file, dst_file, race_flag):\n\n if race_flag:\n time.sleep( random.randint(5, random.randint(10, 50)) )\n if os.path.exists(dst_file):\n while not sameSize(src_file, dst_file):\n time.sleep(10)\n return None\n\n while(True):\n shutil.copy(src_file, dst_file)\n if sameSize(src_file, dst_file):\n break", "def retryFileCopy(self):\n self.areCopiesValid = self.checkCopiedFiles()\n copyRetryCount = 5\n while(copyRetryCount > 1 and not self.areCopiesValid):\n print(\"Something failed in copy, retrying \" + str(copyRetryCount))\n self.firstTimeSetup()\n self.areCopiesValid = self.checkCopiedFiles()\n copyRetryCount -= 1", "def copy_file(source_file, target_file):\n\t# print('\\n\\nCopying [{}] to [{}].\\n\\n'.format(source_file, target_file))\n\trun_rsync([source_file, target_file])", "def copy_supported_files(self):\n\n try:\n for directory in self.config.DIRS_TO_COPY:\n shutil.copytree(self.dir_helper.publication_path + directory,\n self.temp_dir + directory)\n except Exception, ex:\n print '[e] exception {}'.format(str(ex))\n print '[i] looks like to folder existing that are scheduled for copying'\n\n for file_ in self.config.FILES_TO_COPY:\n index = file_.rfind('\\\\')\n dest_file = file_\n if index != -1:\n dest_file = file_[index+1:]\n\n try:\n shutil.copy2(self.dir_helper.publication_path + file_,\n self.temp_dir + dest_file)\n except Exception, ex:\n print '[e] exception {}'.format(str(ex))\n print '[i] file \"{}\" was not copied'.format(self.dir_helper.publication_path + file_)", "def act_copy_file(self, file_source, file_target):\n try:\n path = os.path.dirname(file_target)\n if not os.path.exists(path):\n os.makedirs(path)\n shutil.copy2(file_source, file_target)\n self.logger.debug('%s: Action: <copy> %s -> %s', self.name, file_source, file_target)\n except:\n self.logger.exception('Error on file copy: %s -> %s', file_source, file_target)", "def backup_directory(self, source_directory, destination_directory):\n pass", "def copy_with_retry(src, dst):\n\n for _i in range(0, 5):\n try:\n if os.path.exists(dst):\n delete_with_retry(dst)\n\n shutil.copytree(src, dst)\n return\n except:\n time.sleep(0.1)\n\n print(\"Could not copy directory after 5 attempts\")\n sys.exit(1)", "def backup(target):\n\n directory = os.path.split(target)[1]\n backup_path = os.path.abspath(os.path.join(backup_dir, directory))\n print('Auto backup {} -> {}'.format(target, backup_path))\n if not os.path.exists(backup_path):\n os.mkdir(backup_path)\n\n basename = os.path.basename(backup_path)\n if ' ' in basename:\n # Eliminate the space in folder name.\n basename = '_'.join(basename.split(' '))\n\n time_str = '_'.join(list(map(str, time.localtime()[:6])))\n zip_filename = basename + '_' + time_str + '.zip'\n zip_filepath = os.path.abspath(os.path.join(backup_path, zip_filename))\n\n with zipfile.ZipFile(zip_filepath, 'w') as zip_file:\n files_path = []\n for root, _, files in os.walk(target):\n for file in files:\n files_path.append(os.path.join(root, file))\n for each in files_path:\n print('Compress file {}...'.format(each))\n zip_file.write(each,\n each[each.find(os.path.basename(backup_path)):])", "def create_file(backup_file, input_root, output_root):\n input_path = get_input_path(backup_file, input_root)\n if input_path is None:\n logging.warning(f\"Missing file: {backup_file.file_id} ({backup_file.relative_path})\")\n return 0\n output_path = get_output_path(backup_file, output_root)\n os.makedirs(os.path.dirname(output_path), exist_ok=True)\n copyfile(input_path, output_path)", "def purge_backups(self, source=None, days=None, regex=None):\n if source is None:\n source = self.default_archive_backup_dir\n if days is None:\n days = self.config['retention']\n if regex is None:\n regex = self.name_regex\n files = os.listdir(source)\n pattern = re.compile(regex)\n for entry in files:\n path = os.path.join(source, entry)\n match = pattern.match(entry)\n if match is None:\n continue\n if self.name != match.group(1):\n continue\n timestamp = datetime.datetime.strptime(match.group(2), DATE_FORMAT)\n if (timestamp < (datetime.datetime.now() - datetime.timedelta(days=days)) and\n timestamp > datetime.datetime(2018, 1, 1)):\n self.logger.debug('purging backup %s', path)\n try:\n if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.remove(path)\n except OSError as e:\n return e.errno\n return 0", "def copyChildren(root, target):\n global numCopied, exitStatus\n\n if not os.path.exists(target):\n mkdirRecursive(target)\n childList = os.listdir(root)\n for entry in childList:\n source = root + os.sep + entry\n if os.path.isfile(source) and entry.endswith(\".py\"):\n doCopy = True\n if os.path.exists(target + os.sep + entry):\n srcStat = os.stat(source)\n targetStat = os.stat(target + os.sep + entry)\n if srcStat[stat.ST_MTIME] <= targetStat[stat.ST_MTIME]:\n doCopy = False # target is same or newer\n else:\n os.remove(target + os.sep + entry)\n if doCopy:\n shutil.copy2(source, target)\n shutil.copymode(source, target + os.sep + entry)\n numCopied = numCopied + 1\n elif os.path.isdir(source):\n # make the child directory in the target tree,\n # if it doesn't already exist\n if not os.path.exists(target + os.sep + entry):\n mkdirRecursive(target + os.sep + entry)\n # and then copy all of its children\n copyChildren(root + os.sep + entry, target + \"/\" + entry)", "def daily(dbname, as_username='postgres'):\n\n filename = '{dbname}-{indate}.dump.sql'.format(\n dbname=dbname, indate=datetime.now().strftime('%Y-%m-%d'))\n backup_daily_dir = os.path.join(BACKUPS_STORE_DIR, 'daily')\n if not os.path.isdir(backup_daily_dir):\n os.makedirs(backup_daily_dir)\n\n dumpfile = execute_pgdump(dbname, as_username)\n dst = os.path.join(backup_daily_dir, filename)\n logger.info('moving {src} into {dst}'.format(src=dumpfile, dst=dst))\n shutil.move(dumpfile, dst)\n logger.info('{dst} has a size of {size} bytes.'.format(\n dst=dst, size=get_file_size(dst)))", "def backup(self):\n import datetime\n suffix = datetime.datetime.now().strftime('%Y-%m-%d--%H-%M-%S')\n self.host.run(\"test -f '%s' && cp --archive '%s' '%s.%s'\" % (\n esc1(self.remote_path), esc1(self.remote_path), esc1(self.remote_path), esc1(suffix)), use_sudo=self.use_sudo)", "def do_backup_frequently(start_time: float, users_dict: dict, start_idx: int,\n num_checked_emails: int) -> float:\n if (time.time() - start_time) >= TIME_BTWN_BACKUPS:\n backup_all(users_dict, start_idx, num_checked_emails)\n start_time = time.time()\n return start_time" ]
[ "0.540258", "0.5268108", "0.49308607", "0.479153", "0.47826353", "0.475972", "0.47528076", "0.47310272", "0.469119", "0.46287873", "0.45539945", "0.45484373", "0.4509758", "0.45064256", "0.44562772", "0.4437004", "0.44316283", "0.44010857", "0.43905577", "0.43826994", "0.4366059", "0.43382648", "0.43254876", "0.4317167", "0.4288655", "0.42786953", "0.42711842", "0.42587966", "0.42524105", "0.4241527" ]
0.6508326
0
Make a HTTP request packet request headers message
def make_packet(message, host): RESOURCE = "/" # dummy resource # First line is the request request = HTTPConstants.GET_REQUEST + " " + RESOURCE + " " + HTTPConstants.VERSION + HTTPConstants.CRLF # Next are the headers headers = "Host: {0}".format(host) + HTTPConstants.CRLF # Construct the head head = request + headers # Construct the body body = message + HTTPConstants.CRLF # Assembly into a packet, where the head and body (message) are separated by a blank line (CRLF), and the EOM is # denoted by a blank line return head + HTTPConstants.CRLF + body + HTTPConstants.CRLF
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_request_message(request):\n url = urlparse(request.url)\n request_headers = dict(request.headers)\n if 'Host' not in request_headers:\n request_headers['Host'] = url.netloc\n return HTTPMessage(\n line='{method} {path} HTTP/1.1'.format(\n method=request.method,\n path=url.path or '/'),\n headers=NEW_LINE.join(str('%s: %s') % (name, value)\n for name, value\n in request_headers.items()),\n body=request._enc_data,\n content_type=request_headers.get('Content-Type')\n )", "def requestheaders(self, flow: mitmproxy.http.HTTPFlow):", "def build_http_pkt(line: List[bytes],\n headers: Optional[Dict[bytes, bytes]] = None,\n body: Optional[bytes] = None) -> bytes:\n req = WHITESPACE.join(line) + CRLF\n if headers is not None:\n for k in headers:\n req += build_http_header(k, headers[k]) + CRLF\n req += CRLF\n if body:\n req += body\n return req", "def build_http_pkt(line: List[bytes],\n headers: Optional[Dict[bytes, bytes]] = None,\n body: Optional[bytes] = None) -> bytes:\n req = WHITESPACE.join(line) + CRLF\n if headers is not None:\n for k in headers:\n req += build_http_header(k, headers[k]) + CRLF\n req += CRLF\n if body:\n req += body\n return req", "def requestheaders(self, flow: mitmproxy.http.HTTPFlow):\n pass", "def requestheaders(self, flow: mitmproxy.http.HTTPFlow):\n pass", "def build_http_request(method: bytes, url: bytes,\n protocol_version: bytes = b'HTTP/1.1',\n headers: Optional[Dict[bytes, bytes]] = None,\n body: Optional[bytes] = None) -> bytes:\n if headers is None:\n headers = {}\n return build_http_pkt(\n [method, url, protocol_version], headers, body)", "async def _send_request_headers(self, request: Request, stream_id: int) -> None:\n end_stream = not has_body_headers(request)\n\n # In HTTP/2 the ':authority' pseudo-header is used instead of 'Host'.\n # In order to gracefully handle HTTP/1.1 and HTTP/2 we always require\n # HTTP/1.1 style headers, and map them appropriately if we end up on\n # an HTTP/2 connection.\n authority = [v for k, v in request.headers if k.lower() == b\"host\"][0]\n\n headers = [\n (b\":method\", request.method),\n (b\":authority\", authority),\n (b\":scheme\", request.url.scheme),\n (b\":path\", request.url.target),\n ] + [\n (k.lower(), v)\n for k, v in request.headers\n if k.lower()\n not in (\n b\"host\",\n b\"transfer-encoding\",\n )\n ]\n\n self._h2_state.send_headers(stream_id, headers, end_stream=end_stream)\n self._h2_state.increment_flow_control_window(2**24, stream_id=stream_id)\n await self._write_outgoing_data(request)", "def _req(self):\n self.seq_num += 1\n req = struct.pack(HEADER_FMT, REQUEST_TYPE, self.seq_num)\n return req", "def test_build_headers(self):\n\n headers = self_signed.build_headers()\n assert 'Content-Length' in headers\n assert 'X-Amz-Date' in headers\n assert 'Host' in headers\n assert 'X-Amz-Security-Token' in headers\n assert 'Content-Type' in headers\n assert 'Authorization' in headers", "def build_http_request(method: bytes, url: bytes,\n protocol_version: bytes = HTTP_1_1,\n headers: Optional[Dict[bytes, bytes]] = None,\n body: Optional[bytes] = None) -> bytes:\n if headers is None:\n headers = {}\n return build_http_pkt(\n [method, url, protocol_version], headers, body)", "def view_request_headers(line):\n args = shlex.split(line)\n if not args:\n raise PappyException(\"Request id is required\")\n reqid = args[0]\n\n reqs = yield load_reqlist(reqid)\n for req in reqs:\n if len(reqs) > 1:\n print 'Request %s:' % req.reqid\n view_full_message(req, True)\n if len(reqs) > 1:\n print '-'*30\n print ''", "def test_specific_headers_sent_with_request(self):\n req = self.httpbin.get_my_headers(dry_run=True)\n self.assertIn('All-Request-Headers', req.prepared_request.headers)\n request_data_headers = self.httpbin.client['get_my_headers']['headers']['All-Request-Headers']\n self.assertEqual(req.prepared_request.headers['All-Request-Headers'], request_data_headers)", "def winhttp_WinHttpAddRequestHeaders(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hRequest\", \"pwszHeaders\", \"dwHeadersLength\", \"dwModifiers\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def _send_headers(self, method, url, headers):\n self._sock.send(b' '.join([method, url, b'HTTP/1.1\\r\\n']))\n\n for name, value in headers.iter_raw():\n name, value = to_bytestring(name), to_bytestring(value)\n header = b''.join([name, b': ', value, b'\\r\\n'])\n self._sock.send(header)\n\n self._sock.send(b'\\r\\n')", "def gen_headers(self, http_code):\n\n if http_code == 200:\n http_headers = \"HTTP/1.1 200 OK\\n\"\n elif http_code == 400:\n http_headers = \"HTTP/1.1 400 Bad Request\\n\"\n elif http_code == 404:\n http_headers = \"HTTP/1.1 404 Not Found\\n\"\n\n utc_datetime = datetime.datetime.utcnow().strftime(\"%a, %d %b %Y %H:%M:%S\")\n http_headers += dedent(\"\"\"\\\n Date: %s GMT\n Content-type: text/html; charset=UTF-8\n Server: pydb.py\n Connection: close\\n\\n\"\"\" % utc_datetime)\n\n return http_headers", "def create_http_req(self, data, ID, tunnel_type, host_name):\n\n get = 'GET /' + data.encode(\"hex\") + ' HTTP/1.1' + const.END_LINE\n host = 'Host: ' + host_name + const.END_LINE\n user_agent = ( 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_1) ' +\n 'AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11' +\n const.END_LINE )\n #accept = 'Accept: */*' + const.END_LINE\n accept = 'Accept: text/html,application/xhtml+xml,application/xml' + const.END_LINE\n accept_language = 'Accept-Language: en-us,en;q=0.5' + const.END_LINE\n accept_encoding = 'Accept-Encoding: gzip,deflate' + const.END_LINE\n accept_charset = 'Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7' + const.END_LINE\n #keep_alive = 'Keep-Alive: 115' + const.END_LINE\n keep_alive = ''\n connection = 'Connection: keep-alive' + const.END_LINE\n referer = 'Referer: ' + const.END_LINE\n cookie = ( 'Cookie: ' + const.COOKIE_NAME + '=' + ID + '; ' +\n const.TUNNEL_COOKIE_NAME + '=' + tunnel_type + const.END_LINE )\n\n req = ( get +\n host +\n user_agent +\n accept +\n accept_language +\n accept_encoding +\n accept_charset +\n keep_alive +\n connection +\n referer +\n cookie +\n const.END_LINE )\n\n return req", "def create_marconi_headers():\n auth_token = get_auth_token()\n\n headers = ('{\"Host\": \"$host\",\"User-Agent\": \"$user_agent\",\"Date\":\"DATE\",'\n '\"Accept\": \"application/json\",\"Accept-Encoding\": \"gzip\",'\n '\"X-Project-ID\": \"$project_id\",'\n '\"X-Auth-Token\": \"$token\",\"Client-ID\": \"$uuid\"}')\n headers = string.Template(headers)\n\n return headers.substitute(host=CFG.host, user_agent=CFG.user_agent,\n project_id=CFG.project_id,\n token=auth_token, uuid=CFG.uuid)", "def build_http_header(k: bytes, v: bytes) -> bytes:\n return k + COLON + WHITESPACE + v", "def build_http_header(k: bytes, v: bytes) -> bytes:\n return k + COLON + WHITESPACE + v", "def get_request_header(conn_socket: socket.socket) -> str:\n raw_double_new_line = \"\\r\\n\\r\\n\".encode(HttpServer.FORMAT)\n raw_request_header = conn_socket.recv(HttpServer.HEADER)\n\n while raw_double_new_line not in raw_request_header:\n raw_request_header += conn_socket.recv(HttpServer.HEADER)\n\n return raw_request_header.decode(HttpServer.FORMAT)", "def make_request(sock, server_name):\n p = HttpParser()\n\n request = ('GET / HTTP/1.0\\r\\n' +\n 'User-Agent: pySSLScan\\r\\n' +\n 'Host: %s\\r\\n\\r\\n' % (server_name,))\n sock.write(request.encode('ascii'))\n\n headers = None\n while True:\n data = sock.recv(1024)\n if not data:\n break\n\n recved = len(data)\n nparsed = p.execute(data, recved)\n assert nparsed == recved\n\n if p.is_headers_complete():\n headers = p.get_headers()\n break\n\n return headers", "def _encode_message_header(cls, client_id, correlation_id, request_key,\n version=0):\n return struct.pack('>hhih%ds' % len(client_id),\n request_key, # ApiKey\n version, # ApiVersion\n correlation_id, # CorrelationId\n len(client_id), # ClientId size\n client_id) # ClientId", "def _build_http_header(self) -> Dict[str, str]:\n return {}", "def makePacketHeader(bytes):\n id = struct.unpack('!H', bytes[0:2])[0]\n length = struct.unpack('!H', bytes[2:4])[0]\n packet_count = struct.unpack('!I',bytes[4:8])[0]\n return PacketHeader(id, length, packet_count)", "def _create_headers(self, path, parameters=None, encoding=\"ascii\"):\n if parameters is None:\n parameters = dict()\n payload = {\n 'request': path,\n 'nonce': self._nonce()\n }\n payload.update(parameters)\n creds = self._api_credentials\n b64, signature = self._encode_and_sign(payload, encoding)\n headers = {\n # I think these two headers are set by default.\n #'Content-Type': 'text/plain',\n #'Content-Length': 0,\n 'X-GEMINI-PAYLOAD': b64.decode(encoding),\n 'X-GEMINI-APIKEY': creds.api_key,\n 'X-GEMINI-SIGNATURE': signature\n }\n return headers", "def test_make_request_headers(self, m_requests, m_sleep):\r\n request = testing.DummyRequest({mut.URL_KEY: SAMPLE_URL, \r\n mut.HEADERS_KEY: json.dumps(SAMPLE_REQUEST_HEADERS)})\r\n m_response, response_dict = self.mock_response()\r\n m_requests.get.return_value = m_response\r\n self.assertEqual(response_dict, mut.make_request(request))\r\n m_requests.get.assert_called_with(url=SAMPLE_URL, \r\n headers=SAMPLE_REQUEST_HEADERS)\r\n m_sleep.assert_called_with(mut.SECURITY_SLEEP)", "def process_header_request(self, request, http_s_obj):\n response_dict = {}\n data = request.split(\"\\r\\n\\r\\n\")\n header_info = data[0].split(\"\\r\\n\")\n headers = self.updateheader(header_info, http_s_obj)\n response_dict.update({'type': header_info[0].split()[0]})\n response_dict.update({'headers': headers})\n body = data[1]\n response_dict.update({'data': body})\n path = header_info[0].split()[1]\n if path.find('?') != -1:\n split_sym = '?'\n if path.find('&') != -1:\n split_sym = '&'\n try:\n req = path.split(split_sym)\n path = req[0]\n query = req[1]\n except Exception as e:\n query = ''\n response_dict.update({'path': path})\n response_dict.update({'query': query})\n\n return response_dict", "def __init__(self, header=Header(Type.OFPT_REQUESTFORWARD), request=None):\n super().__init__()\n self.header = header\n self.request = request", "def test_user_headers_sent_with_request(self):\n user_header = {'All-Request-Headers': 'Headers from user code'}\n req = self.httpbin.get_my_headers(headers=user_header, dry_run=True)\n self.assertIn('All-Request-Headers', req.prepared_request.headers)\n self.assertEqual(req.prepared_request.headers['All-Request-Headers'], user_header['All-Request-Headers'])" ]
[ "0.6875337", "0.6805739", "0.66936874", "0.66936874", "0.64733094", "0.64733094", "0.6256127", "0.623692", "0.6143666", "0.61382276", "0.6119428", "0.6079149", "0.6015242", "0.5993089", "0.5989563", "0.5983968", "0.5971236", "0.59581345", "0.59575814", "0.59575814", "0.5942634", "0.5913379", "0.5891781", "0.58886355", "0.5838973", "0.58095396", "0.5781632", "0.57695484", "0.57674664", "0.57666177" ]
0.69332606
0
Parse a HTTP response packet statusline body
def parse_response(conn): # HEAD head = read2CRLF(conn) if debug == 0: print("STATUS LINE: ", head ) # Tokens in the status line are separated by spaces # status_line = http-version <SP> status_code <SP> Reason status_line = head.decode().split(' ') # Parse the status line if len(status_line) < 3: raise Exception("Malformed Response Packet") if status_line[0] != HTTPConstants.VERSION: raise Exception("Malformed Response Packet") # CRLF separation between HEAD and BODY expectCRLF(conn) # BODY data = read2CRLF(conn) response = [ status_line[1], status_line[2] ] # return the data in the body return response, data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_status_line(line):\n # Up to the first space is the protocol version.\n index0 = line.index(SPACE)\n http_version = line[: index0]\n # Make sure it's the protocol version we recognize.\n assert http_version == HTTP_VERSION\n # Starting from the first space, up to the next space is the status code.\n index1 = line.index(SPACE, index0 + 1)\n status = line[index0 + 1 : index1]\n # Convert the status code to an integer.\n status = int(status)\n # The remainder is the reason.\n reason = line[index1 + 1 :]\n return status, reason", "def parse_response(self, buffer):\n # Begin by copying the data out of the buffer. This is necessary\n # because as much as possible we want to use the built-in bytestring\n # methods, rather than looping over the data in Python.\n temp_buffer = buffer.tobytes()\n\n index = temp_buffer.find(b'\\n')\n if index == -1:\n return None\n\n version, status, reason = (\n temp_buffer[0:index].split(None, 2) + [b''])[:3]\n if not version.startswith(b'HTTP/1.'):\n raise ParseError(\"Not HTTP/1.X!\")\n\n minor_version = int(version[7:])\n status = int(status)\n reason = memoryview(reason.strip())\n\n # Chomp the newline.\n index += 1\n\n # Now, parse the headers out.\n end_index = index\n headers = []\n\n while True:\n end_index = temp_buffer.find(b'\\n', index)\n if end_index == -1:\n return None\n elif (end_index - index) <= 1:\n # Chomp the newline\n end_index += 1\n break\n\n name, value = temp_buffer[index:end_index].split(b':', 1)\n value = value.strip()\n headers.append((memoryview(name), memoryview(value)))\n index = end_index + 1\n\n resp = Response(status, reason, minor_version, headers, end_index)\n return resp", "def __read_response(self, nblines=-1):\n resp, code, data = (b\"\", None, None)\n cpt = 0\n while True:\n try:\n line = self.__read_line()\n except Response as inst:\n code = inst.code\n data = inst.data\n break\n except Literal as inst:\n resp += self.__read_block(inst.value)\n if not resp.endswith(CRLF):\n resp += self.__read_line() + CRLF\n continue\n if not len(line):\n continue\n resp += line + CRLF\n cpt += 1\n if nblines != -1 and cpt == nblines:\n break\n\n return (code, data, resp)", "def parse_response(msg):\n start_line, header, body = _parse_message(msg)\n status, reason = _parse_status_line(start_line)\n return Response(status, reason, header, body)", "def extract_http_resp( buf, tunnel_type ):\n match = re.match('HTTP/1.[01] ([0-9]{3}) ', buf)\n if not match:\n # print 'warning: BUF DOES NOT MATCH expected header [%s]' % str(buf)\n return (buf, None, '', '', -3)\n else:\n status = int(match.group(1))\n\n pieces = buf.split('\\r\\n\\r\\n', 1)\n if len(pieces) != 2:\n #print 'ERROR: bogus http response (%d pieces)' % len(pieces)\n return (buf, None, '', '', -4)\n\n header = pieces[0]\n\n # 301: moved permanently\n # 302: moved temporarily/\"found\"\n #\n if (status == 301) or (status == 302): # Look elsewhere\n match = re.search('^[Ll]ocation:\\s*([^\\r]*)\\s$',\n header, re.MULTILINE)\n if match:\n location = match.group(1).strip()\n # print \"REDIRECT LOCATION = [%s]\" % location\n\n # search the header for either a Content-Length or Transfer-Encoding\n # tag, and use it to figure out how long the body is.\n #\n # An HTTP/1.1 header should have one or the other.\n if re.search('^[Cc]onnection:\\s*[Cc]lose\\s*$',\n header, re.MULTILINE):\n status = '-2'\n\n match = re.search('^[Cc]ontent-[Ll]ength:\\s*([0-9]+)\\s*$',\n buf, re.MULTILINE)\n if match:\n unzip_body = ''\n\n try:\n len_header = buf.index( const.END_HEADER ) + len( const.END_HEADER )\n except ValueError:\n return ( buf, None, '', '', -1)\n\n content_len = int( match.group(1) )\n (new_buf, response, body) = process_content_len(buf, content_len)\n\n if response == None:\n return (new_buf, None, '', '', -1)\n\n # We only unzip for http uni tunnel. The http bi tunnel\n # directly modifies the zipped payload\n #\n if re.search('^[Cc]ontent-[Ee]ncoding:\\s*[Gg]zip\\s*$',\n header, re.MULTILINE) and tunnel_type is const.HTTP_UNI_TUNNEL:\n try:\n strobj = body\n fileobj = cStringIO.StringIO(strobj)\n unzip_body = gzip.GzipFile(\"dummy-name\", 'rb', 9, fileobj).read()\n\n except zlib.error:\n print 'Error: response body is not gzip'\n\n return (new_buf, response, body, unzip_body, status)\n\n\n elif re.search('^[Tt]ransfer-[Ee]ncoding:\\s*chunked\\s*$',\n header, re.MULTILINE):\n\n finished = False\n response = None\n body = ''\n unzip_body = ''\n\n try:\n len_header = buf.index( const.END_HEADER ) + len( const.END_HEADER )\n except ValueError:\n return ( buf, response, body, unzip_body, status )\n\n new_buf = buf[len_header:]\n msg_len = len_header\n\n while True:\n try:\n new_line = new_buf.index( const.END_LINE )\n\n clen = 0\n for i in range(0,new_line):\n digit = new_buf[i]\n if re.match('[0-9a-fA-F]', digit):\n clen *= 16\n clen += int(digit, 16)\n\n if clen == 0:\n finished = True\n msg = buf[ : msg_len]\n body = buf[len_header : msg_len]\n\n # peel off the last 0\\r\\n\\r\\n.\n new_buf = new_buf[5:] # TODO: this is a big assumption\n break\n else:\n chunk_offset = (\n new_line + len(const.END_LINE) + clen + len(const.END_LINE) )\n\n start_chunk = new_line + len(const.END_LINE)\n end_chunk = start_chunk + clen #+ len(const.END_LINE)\n chunk = new_buf[start_chunk:end_chunk]\n\n unzip_body = unzip_body + chunk\n new_buf = new_buf[ chunk_offset : ]\n msg_len += chunk_offset\n\n except ValueError:\n return ( buf, None, None, None, -1 )\n\n if finished == True:\n\n\n # We only unzip for http uni tunnel. The http bi tunnel\n # directly modifies the zipped payload\n #\n if re.search('^[Cc]ontent-[Ee]ncoding:\\s*[Gg]zip\\s*$',\n header, re.MULTILINE) and tunnel_type is const.HTTP_UNI_TUNNEL:\n try:\n strobj = unzip_body\n fileobj = cStringIO.StringIO(strobj)\n unzip_body = gzip.GzipFile(\"dummy-name\", 'rb', 9, fileobj).read()\n except zlib.error:\n print 'Error: response body is not gzip'\n\n return (new_buf, msg, body, unzip_body, status)\n else:\n return(buf, None, None, None, -1 )", "def _response(status_line):\n return b\"HTTP/1.1 \" + status_line + b\"\\nContent-length: 0\\n\\n\"", "def __parse_response(self, response_text):\n root = etree.fromstring(response_text)\n namespace = re.search('{(.*)}', root.tag).group(1)\n status_path = '{%s}Status' % namespace\n status = int(root.findtext(status_path))\n response = None\n if status != 0:\n response = Error(status, root, namespace)\n else:\n response = self._parse_response_body(root, namespace)\n return response", "def process(self, raw: bytes) -> Tuple[bool, bytes]:\n line, raw = find_http_line(raw)\n if line is None:\n return False, raw\n\n if self.state == httpParserStates.INITIALIZED:\n self.process_line(line)\n self.state = httpParserStates.LINE_RCVD\n elif self.state in (httpParserStates.LINE_RCVD, httpParserStates.RCVING_HEADERS):\n if self.state == httpParserStates.LINE_RCVD:\n # LINE_RCVD state is equivalent to RCVING_HEADERS\n self.state = httpParserStates.RCVING_HEADERS\n if line.strip() == b'': # Blank line received.\n self.state = httpParserStates.HEADERS_COMPLETE\n else:\n self.process_header(line)\n\n # When connect request is received without a following host header\n # See\n # `TestHttpParser.test_connect_request_without_host_header_request_parse`\n # for details\n if self.state == httpParserStates.LINE_RCVD and \\\n self.type == httpParserTypes.RESPONSE_PARSER and \\\n raw == CRLF:\n self.state = httpParserStates.COMPLETE\n # When raw request has ended with \\r\\n\\r\\n and no more http headers are expected\n # See `TestHttpParser.test_request_parse_without_content_length` and\n # `TestHttpParser.test_response_parse_without_content_length` for details\n elif self.state == httpParserStates.HEADERS_COMPLETE and \\\n self.type == httpParserTypes.REQUEST_PARSER and \\\n self.method != httpMethods.POST and \\\n self.bytes.endswith(CRLF * 2):\n self.state = httpParserStates.COMPLETE\n elif self.state == httpParserStates.HEADERS_COMPLETE and \\\n self.type == httpParserTypes.REQUEST_PARSER and \\\n self.method == httpMethods.POST and \\\n (b'content-length' not in self.headers or\n (b'content-length' in self.headers and\n int(self.headers[b'content-length'][1]) == 0)) and \\\n self.bytes.endswith(CRLF * 2):\n self.state = httpParserStates.COMPLETE\n\n return len(raw) > 0, raw", "def parse_response(self):\n pass", "def process_raw_response(self):\n non_excepts = self.non_exceptionals\n raw = self.raw_response\n\n #if the raw respones is an urllib2 error act accordingly.\n if isinstance(raw, non_excepts):\n self.error = raw\n if isinstance(raw, HTTPError):\n self.status_code = raw.code\n self.headers = dict(raw.headers)\n else:\n #its a url error nothing to do\n pass\n\n else:\n #only urllib.addinfourl type should be now be possible\n self.status_code = raw.code\n self.headers = dict(raw.headers)\n self.body = \"\".join(raw.readlines())", "def parse_response(self, resp):\n p, u = self.getparser()\n\n if hasattr(resp, 'text'):\n # modern requests will do this for us\n text = resp.text # this is unicode(py2)/str(py3)\n else:\n\n encoding = requests.utils.get_encoding_from_headers(resp.headers)\n if encoding is None:\n encoding = 'utf-8' # FIXME: what to do here?\n\n if sys.version_info[0] == 2:\n text = unicode( # noqa: F821\n resp.content, encoding, errors='replace')\n else:\n assert sys.version_info[0] == 3\n text = str(resp.content, encoding, errors='replace')\n p.feed(text)\n p.close()\n return u.close()", "def parse_response(self, raw_response):\n \n parsed_response = {\n 'success': False,\n 'raw_response': raw_response,\n }\n \n # Try to make sense of the response status\n try:\n status, msg = raw_response.split('\\r\\n')\n parsed_response['success'] = status == 'OK'\n parsed_response['message'] = msg\n except:\n msg = None\n \n # Try to parse the message ID\n try:\n key, val = msg.split('=')\n parsed_response[key] = val\n except:\n pass\n \n return parsed_response", "def parse_response(response):\n return json.loads(response.read()[MAGIC_PREFIX_OFFSET:])", "def parse(self, response):", "def parse_response(self, response, case):\n request = response.request\n parsed = {\n 'request': {\n 'method': request.method,\n 'url': request.url,\n 'body': request.body,\n },\n 'response': {\n 'headers': OrderedDict(),\n 'status_code': response.status_code,\n 'reason': response.reason,\n }\n }\n\n # Re-assemble request line\n url_parts = urlparse(request.url)\n parsed['request']['request_line'] = '%s %s%s%s HTTP/1.1' % (\n request.method, url_parts.path, '?' if url_parts.query else '',\n url_parts.query)\n\n # Process request headers\n if self.mode == 'display':\n hostname = url_parts.hostname\n else:\n hostname = self.doc_hostname\n parsed['request']['headers'] = OrderedDict((('Host', hostname),))\n for header in sorted([h.title() for h in request.headers]):\n raw_value = request.headers[header]\n value = self.parse_header(header, raw_value, 'request')\n if value:\n parsed['request']['headers'][header.title()] = value\n\n # Re-assemble response line\n parsed['response']['response_line'] = 'HTTP/1.1 %s %s' % (\n response.status_code, response.reason)\n\n # Process response headers\n for header in sorted([h.title() for h in response.headers]):\n raw_value = response.headers[header]\n value = self.parse_header(header, raw_value, 'response')\n if value:\n fixed_header = header.title().replace('Www', 'WWW')\n parsed['response']['headers'][fixed_header] = value\n\n # Process response body\n response.encoding = 'utf-8'\n body = response.text\n if self.standardize:\n body = body.replace(api, self.doc_base_url)\n for key, value in case.get('standardize', {}).items():\n assert key in ('created', 'modified', 'date')\n pattern = r\"\"\"(?x)(?s) # Be verbose, . include newlines\n \"%s\":\\s\" # Key and quote\n \\d{4}-\\d{2}-\\d{2} # Date\n T\\d{2}:\\d{2}:\\d{2} # Time\n \\.\\d{0,6}Z # Microseconds and UTC timezone\n \", # End quote and comma\n \"\"\" % key\n replace = '\"%s\": \"%s\",' % (key, value)\n body = re.sub(pattern, replace, body)\n parsed['response']['body'] = body\n\n return parsed", "def _process_pool_status_response(self, buf, length):\n\t\tself.pcpResInfo.pcp_add_json_result('command_status', 'success')\n\t\tvalue, index = self._getNextString(buf, 0)\n\t\tif value == 'ArraySize':\n\t\t\tindex += 1\n\t\t\tci_size = buf[index:]\n\t\t\tci_size = self.bytes_to_int(ci_size)\n\n\t\t\tself._setResultStatus(ResultStateType.INCOMPLETE)\n\t\t\tself.pcpResInfo.pcp_add_json_result('config', list())\n\t\telif value == 'ProcessConfig':\n\t\t\tindex += 1\n\t\t\tif self.PCPResultStatus(self.pcpResInfo) != ResultStateType.INCOMPLETE:\n\t\t\t\tself.pcp_internal_error('command failed. invalid response')\n\t\t\t\tself.pcpResInfo.pcp_add_json_result('command_status', 'failed')\n\t\t\t\tself._setResultStatus(ResultStateType.BAD_RESPONSE)\n\n\t\t\tstatus = POOL_REPORT_CONFIG()\n\n\t\t\tvalue, index = self._getNextString(buf, index)\n\t\t\tif value:\n\t\t\t\tindex += 1\n\t\t\t\tstatus.name = value\n\n\t\t\tvalue, index = self._getNextString(buf, index)\n\t\t\tif value:\n\t\t\t\tindex += 1\n\t\t\t\tstatus.value = value\n\n\t\t\tvalue, index = self._getNextString(buf, index)\n\t\t\tif value:\n\t\t\t\tindex += 1\n\t\t\t\tstatus.desc = value\n\n\t\t\tself.pcpResInfo.pcp_append_json_result('config', status.get_json())\n\t\t\tself._setResultData(self.pcpResInfo, status)\n\t\telif value == 'CommandComplete':\n\t\t\tself._setResultStatus(ResultStateType.COMMAND_OK)", "def parse_header(self):", "def _unpack_body(self, buff):\n\n # Unpack <return_code> and <count> (how many records affected or selected)\n self._return_code = struct_L.unpack_from(buff, offset=0)[0]\n\n # Separate return_code and completion_code\n self._completion_status = self._return_code & 0x00ff\n self._return_code >>= 8\n\n # In case of an error unpack the body as an error message\n if self._return_code != 0:\n self._return_message = unicode(buff[4:-1], self.charset, self.errors)\n if self._completion_status == 2:\n raise TarantoolError(self._return_code, self._return_message)\n\n # Unpack <count> (how many records affected or selected)\n self._rowcount = struct_L.unpack_from(buff, offset=4)[0]\n\n # If the response doesn't contain any tuple - there is nothing to unpack\n if self._body_length == 8:\n return\n\n # Parse response tuples (<fq_tuple>)\n if self._rowcount > 0:\n offset = 8 # The first 4 bytes in the response body is the <count> we have already read\n while offset < self._body_length:\n # In resonse tuples have the form <size><tuple> (<fq_tuple> ::= <size><tuple>).\n # Attribute <size> takes into account only size of tuple's <field> payload,\n # but does not include 4-byte of <cardinality> field.\n #Therefore the actual size of the <tuple> is greater to 4 bytes.\n tuple_size = struct.unpack_from(\"<L\", buff, offset)[0] + 4\n tuple_data = struct.unpack_from(\"<%ds\" % (tuple_size), buff, offset+4)[0]\n tuple_value = self._unpack_tuple(tuple_data)\n if self.field_types:\n self.append(self._cast_tuple(tuple_value))\n else:\n self.append(tuple_value)\n\n offset = offset + tuple_size + 4 # This '4' is a size of <size> attribute", "def _read_response_header(self):\r\n length = None\r\n encoding = \"identity\"\r\n chunked = False\r\n\r\n hdr = []\r\n while True:\r\n line = self._read_line()\r\n if not line:\r\n break\r\n hdr.append(line)\r\n\r\n for line in hdr:\r\n if \"Content-Length\" in line:\r\n length = int(line[15:])\r\n if \"Content-Encoding\" in line:\r\n encoding = line[17:].strip()\r\n if \"Transfer-Encoding: chunked\" in line:\r\n chunked = True\r\n\r\n return (length, encoding, chunked)", "def parse_response(response):\n # a result should always have a status\n status = response['status']\n\n # a result _may_ have a results or a reason\n result = response.get('results', [])\n reason = response.get('reason', None)\n\n return status, result, reason", "def parse_response(page_content):\n\n try:\n parsed = html_parser.get_line_status(str(page_content))\n\n except Exception:\n raise ParseError()\n\n if not isinstance(parsed, dict):\n logging.error(\"Parse failed\")\n raise ParseError()\n\n result = {_LINE_NAMES_MAP[parsed_line]: (\n _match_status_keyword(parsed_status), _match_reason_keyword(parsed_status))\n for parsed_line, parsed_status in parsed.items()}\n return result", "def parse_status_packet(msg):\n data = msg.data.decode()\n src = msg.remote_device.get_64bit_addr()\n\n cmd = data.split(\"|\")\n\n packet_type = int(cmd[0])\n if packet_type == 1:\n unix_time = int(cmd[1])\n xacc = int(cmd[2])\n yacc = int(cmd[3])\n zacc = int(cmd[4])\n batt = float(cmd[5])\n intc = int(cmd[6])\n\n causes = { 0: \"Unknown\",\n 1: \"Waspmote was moved\",\n 2: \"Timeout\" }\n\n print(f\"\\n\\n *** Waspmote {src} status *** \")\n print(\"\\t\", datetime.fromtimestamp(unix_time).strftime(\"%a, %Y/%m/%d, %H:%M:%S\"))\n print(f\"\\t(x,y,z) = ({xacc}, {yacc}, {zacc})\")\n print(f\"\\tBattery level = {batt} %\")\n print(f\"\\tInterruption cause : {causes[intc]}\\n\\n\")\n return True\n\n return False", "def parse(lines): \n replied = len(lines)\n avg_delay, lost = 0, 0\n qos = 1.0\n \n if replied != 0:\n for line in lines:\n line.strip() #remove leading and trailing spaces\n \"\"\"\n Each line has the following fields:\n [status code] [reply time (seconds since epoch)] [source IP] [source url] [source query] [serving delay]\n \n e.g.:\n 200 1296756182 192.168.10.2 /home.php ?N=192 11045\n 200 1296756183 192.168.10.2 /home.php ?N=192 230036\n 200 1296756183 192.168.10.2 /home.php ?N=192 230684\n \"\"\"\n status, time, sourceIP, url, query, delay = line.split()\n \n time = int(time)\n delay = int(delay)\n \n if delay > DEADLINE:\n lost += 1\n avg_delay += delay\n avg_delay /= replied\n qos = (replied - lost) / replied\n\n return {'replied': replied, 'delay' : avg_delay, 'qos' : qos, 'lost': lost}", "def status_ok_response(response_body):\n return '{header_top}{line_break}{header_fields}{line_break}{body}'.format(\n header_top=get_header_start(STATUS_OK),\n line_break=l_b,\n header_fields=get_header_fields({\n 'Content-Type': 'text/html; charset=utf-8',\n 'Connection': 'keep-alive'\n }),\n body=response_body\n )", "def _parse_boxee_response( self, response ):\n from xml.dom import minidom\n\n self._status(\"Parsing response from Boxee:\\n\" + response)\n\n dom = minidom.parseString(response)\n\n for node in dom.getElementsByTagName('BDP1'):\n self.BOXEE_PORT = node.getAttribute('httpPort')", "def _parse_udp_packet(self, packet_bytes):\n opcode = packet_bytes[:2]\n if opcode == 5:\n reply = self.error_messages[int.from_bytes(packet_bytes[2:4], 'big')]\n print(reply)\n elif opcode == 4:\n reply = \"ACK\"\n else:\n reply = \"UNK\"\n return reply", "def view_response_bytes(line):\n reqs = yield load_reqlist(line)\n for req in reqs:\n if req.response:\n if len(reqs) > 1:\n print '-'*15 + (' %s ' % req.reqid) + '-'*15\n print req.response.full_message\n else:\n print \"Request %s does not have a response\" % req.reqid", "def _read_headers(self, data):\n do_close = False\n\n try:\n initial_line, data = data.split(CRLF, 1)\n try:\n try:\n http_version, status, status_text = initial_line.split(' ', 2)\n status = int(status)\n except ValueError:\n http_version, status = initial_line.split(' ')\n status = int(status)\n status_text = HTTP.get(status, '')\n except ValueError:\n raise BadRequest('Invalid HTTP status line %r.' % initial_line)\n\n # Parse the headers.\n headers = read_headers(data)\n\n # Construct an HTTPResponse object.\n self.current_response = response = HTTPResponse(self,\n self._requests[0], http_version, status, status_text, headers)\n\n # Do we have a Content-Encoding header?\n if 'Content-Encoding' in headers:\n encoding = headers['Content-Encoding']\n if encoding == 'gzip':\n response._decompressor = zlib.decompressobj(16+zlib.MAX_WBITS)\n elif encoding == 'deflate':\n response._decompressor = zlib.decompressobj(-zlib.MAX_WBITS)\n\n # Do we have a Content-Length header?\n if 'Content-Length' in headers:\n self._stream.on_read = self._read_body\n self._stream.read_delimiter = int(headers['Content-Length'])\n\n elif 'Transfer-Encoding' in headers:\n if headers['Transfer-Encoding'] == 'chunked':\n self._stream.on_read = self._read_chunk_head\n self._stream.read_delimiter = CRLF\n else:\n raise BadRequest(\"Unsupported Transfer-Encoding: %s\" % headers['Transfer-Encoding'])\n\n # Is this a HEAD request? If so, then handle the request NOW.\n if response.method == 'HEAD':\n self._on_response()\n\n except BadRequest, e:\n log.info('Bad response from %r: %s',\n self._server, e)\n do_close = True\n\n except Exception:\n log.exception('Error handling HTTP response.')\n do_close = True\n\n # Clear the way for the next request.\n if do_close:\n self._requests.pop(0)\n self.current_response = None\n if self._stream:\n self._stream.close()\n self._stream = None", "def _parse_packet(packet: StreamMessageResponse) -> Packet:\n if packet is None:\n raise TypeError(\"Packet cannot be None!\")\n\n packet = MessageToDict(packet)\n\n # Decoding Header\n ingress_port_base64 = packet['packet']['metadata'][0]['value'].encode()\n ingress_port = base64.decodebytes(ingress_port_base64) # retrieving ingress_port; not used, yet\n\n # Decoding Payload\n packet = _scapy_parse(packet)\n\n return packet", "def process_content_len(buf, content_len):\n try:\n end_resp = buf.index( const.END_HEADER ) + len( const.END_HEADER )\n resp_len = end_resp + content_len\n\n if len( buf ) < resp_len:\n return ( buf, None, None )\n\n except ValueError:\n print \"Buf doesn't contain full response yet\"\n return ( buf, None, None )\n\n\n # Pull out response\n #\n resp = buf[ : end_resp + content_len ]\n new_buf = buf[ end_resp + content_len : ]\n payload = buf[ end_resp : end_resp + content_len ]\n if ( new_buf == None ):\n new_buf = ''\n\n return (new_buf, resp, payload)" ]
[ "0.7128819", "0.6732791", "0.66464615", "0.659241", "0.6506562", "0.6455829", "0.63313085", "0.62830883", "0.6256949", "0.6198643", "0.6171631", "0.61465275", "0.60450613", "0.598626", "0.5964472", "0.5955491", "0.5901113", "0.58969516", "0.5878095", "0.58769494", "0.5867655", "0.58628374", "0.5846196", "0.58293605", "0.5814342", "0.58105755", "0.58014685", "0.57882094", "0.5730137", "0.5723893" ]
0.7798692
0
Accepts a coco object detection file. Returns list of images and categories.
def process_coco(coco_file_path: str) -> (list, dict): coco_dict = load_json(coco_file_path) # rearrange coco file for better annotation reach images = list() for image in coco_dict["images"]: image_annotations = list() for annotation in coco_dict["annotations"]: if image["id"] == annotation["image_id"]: image_annotations.append(annotation) image["annotations"] = image_annotations images.append(image) return images, coco_dict["categories"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_labelme_to_coco(path_to_data):\r\n # convert labelme annotations to coco\r\n labelme2coco.convert(path_to_data, path_to_data + r'\\coco_annotation.json')\r\n\r\n # Open the coco format data\r\n with open(path_to_data + r'\\coco_annotation.json') as f:\r\n coco_d = json.load(f)\r\n\r\n # Get the category IDs for each category and create a new \"categories\" section.\r\n categories = []\r\n # for category in coco_d['categories']:\r\n # if category['name'] == 'Bad':\r\n # categories.append({\"id\": category['id'],\r\n # \"name\": category['id'],\r\n # \"supercategory\": category['id'],\r\n # \"isthing\": 1,\r\n # \"color\": [222, 23, 1]\r\n # })\r\n # elif category['name'] == 'Good':\r\n # categories.append({\"id\": category['id'],\r\n # \"name\": \"Good\",\r\n # \"supercategory\": \"Good\",\r\n # \"isthing\": 1,\r\n # \"color\": [133, 23, 1]\r\n # })\r\n\r\n # Update the \"catogories\" section of the coco format data with the correct category IDs.\r\n # coco_d['categories'] = categories\r\n\r\n categories = []\r\n for cat in coco_d['categories']:\r\n cat['isthing'] = 1\r\n categories.append(cat['name'])\r\n\r\n # Fix the segmentation and bbox.\r\n for annot in coco_d['annotations']:\r\n annot['bbox_mode'] = 0\r\n seg = annot['segmentation'][0]\r\n annot['bbox'] = seg\r\n annot['segmentation'] = [[seg[0], seg[1], seg[0], seg[3], seg[2], seg[3], seg[2], seg[1]]]\r\n\r\n # Save the modified coco format data.\r\n with open(path_to_data + r'\\coco_annotation.json', 'w') as j:\r\n json.dump(coco_d, j, sort_keys=True, indent=4)\r\n\r\n # Show the images to the user to validate the annotations.\r\n # Register the image information.\r\n register_coco_instances(\"coco_visualise\", {}, path_to_data + r\"/coco_annotation.json\",\r\n path_to_data)\r\n MetadataCatalog.get(\"meta_visualise\").set(thing_classes=categories)\r\n # MetadataCatalog.get(\"meta_train\").set(thing_classes=[\"Bad\", \"Good\"], thing_colors=[(172, 0, 0), (229, 0, 0)])\r\n train_metadata = MetadataCatalog.get(\"meta_visualise\")\r\n coco_train_dataset = DatasetCatalog.get(\"coco_visualise\")\r\n\r\n st.write('Showing the randomly picked 5 images. Check if the annotation is correctly embedded.')\r\n # Randomly pick 5 images to show to the user to validate the annotations.\r\n for d in random.sample(coco_train_dataset, 5):\r\n im = Image.open(d['file_name'])\r\n im_array = np.asarray(im)\r\n v = Visualizer(im_array, metadata=train_metadata, instance_mode=ColorMode.SEGMENTATION, scale=0.5)\r\n v = v.draw_dataset_dict(d)\r\n pil_image = Image.fromarray(v.get_image())\r\n st.image(pil_image)\r\n # window = tk.Toplevel()\r\n # window.tkimage = ImageTk.PhotoImage(pil_image)\r\n # window.attributes('-topmost', True)\r\n # label = tk.Label(window, image=window.tkimage)\r\n # label.pack()\r\n # button_close = tk.Button(window, text=\"Close\", command=window.destroy)\r\n # button_close.pack(fill='x')\r\n\r\n # Confirm the annotations with user. If the annotations are correct, it will proceed further.\r\n # If not, it terminates the program.\r\n # if messagebox.askyesno(title=\"Validate Annotations\", message=\"Were all annotations correct?\"):\r\n # pass\r\n DatasetCatalog.clear()\r\n MetadataCatalog.clear()", "def _load_colabeled_img(self) -> np.ndarray:\n return tifffile.imread(str(self.colabel_img))", "def load_cifar_images(filename):\n\n from load_cifar import load_file\n from load_cifar import label_dict\n\n data,labels = load_file(filename)\n\n # two classes to keep\n class0 = label_dict['airplane']\n class1 = label_dict['bird']\n # remove all but two classes\n keep = np.logical_or(labels==class0,labels==class1)\n data = data[keep,...]\n labels = labels[keep]\n # set labels to 0 or 1\n labels[labels==class0]=0\n labels[labels==class1]=1\n\n # rgb -> grayscale\n gray_data = rgb2gray(data)\n return data,gray_data,labels", "def object_detect(filename):\n cv2.ocl.setUseOpenCL(False)\n just_fname = filename.split(\".\")[0]\n image = cv2.imread('./static/uploads/' + filename)\n bbox, label, conf = cv.detect_common_objects(image)\n output_image = draw_bbox(image, bbox, label, conf)\n plt.imshow(output_image)\n plt.savefig(os.path.join('./static/output/', just_fname + '.png'))\n d = Counter(label)\n if not label:\n return \"No objects detected\"\n labelstr = \", \".join('{} {}'.format(v, k) for k, v in d.items())\n return labelstr", "def create_coco_label(is_training):\n from pycocotools.coco import COCO\n\n coco_root = config.coco_root\n data_type = config.val_data_type\n if is_training:\n data_type = config.train_data_type\n\n # Classes need to train or test.\n train_cls = config.coco_classes\n train_cls_dict = {}\n for i, cls in enumerate(train_cls):\n train_cls_dict[cls] = i\n\n anno_json = os.path.join(coco_root, config.instances_set.format(data_type))\n\n coco = COCO(anno_json)\n classs_dict = {}\n cat_ids = coco.loadCats(coco.getCatIds())\n for cat in cat_ids:\n classs_dict[cat[\"id\"]] = cat[\"name\"]\n\n image_ids = coco.getImgIds()\n images = []\n image_path_dict = {}\n image_anno_dict = {}\n\n for img_id in image_ids:\n image_info = coco.loadImgs(img_id)\n file_name = image_info[0][\"file_name\"]\n anno_ids = coco.getAnnIds(imgIds=img_id, iscrowd=None)\n anno = coco.loadAnns(anno_ids)\n image_path = os.path.join(coco_root, data_type, file_name)\n annos = []\n iscrowd = False\n for label in anno:\n bbox = label[\"bbox\"]\n class_name = classs_dict[label[\"category_id\"]]\n iscrowd = iscrowd or label[\"iscrowd\"]\n if class_name in train_cls:\n x_min, x_max = bbox[0], bbox[0] + bbox[2]\n y_min, y_max = bbox[1], bbox[1] + bbox[3]\n annos.append(list(map(round, [y_min, x_min, y_max, x_max])) + [train_cls_dict[class_name]])\n\n if not is_training and iscrowd:\n continue\n if len(annos) >= 1:\n images.append(img_id)\n image_path_dict[img_id] = image_path\n image_anno_dict[img_id] = np.array(annos)\n\n return images, image_path_dict, image_anno_dict", "def get_imgs_from_json(self):\n # instantiate COCO specifying the annotations json path\n # Specify a list of category names of interest\n catIds = self.coco.getCatIds(catNms=[self.categ])\n print(\"catIds: \", catIds)\n # Get the corresponding image ids and images using loadImgs\n imgIds = self.coco.getImgIds(catIds=catIds)\n images = self.coco.loadImgs(imgIds)\n print(f\"{len(images)} images in '{self.json_path}' with '{self.categ}' instances\")\n self.catIds = catIds # list\n return images", "def coco_to_scalabel(coco: GtType) -> Tuple[List[Frame], Config]:\n vid_id2name: Optional[Dict[int, str]] = None\n if \"videos\" in coco:\n vid_id2name = {\n video[\"id\"]: video[\"name\"]\n for video in coco[\"videos\"] # type: ignore\n }\n img_id2img: Dict[int, ImgType] = {img[\"id\"]: img for img in coco[\"images\"]}\n\n cats = [\n None for _ in range(len(coco[\"categories\"]))\n ] # type: List[Optional[Category]]\n cat_id2name = {}\n for category in coco[\"categories\"]:\n assert 0 < int(category[\"id\"]) <= len(coco[\"categories\"])\n cat_id2name[category[\"id\"]] = category[\"name\"]\n cats[int(category[\"id\"]) - 1] = Category(name=category[\"name\"])\n assert None not in cats\n config = Config(categories=cats)\n\n img_id2anns: Dict[int, Iterable[AnnType]] = {\n img_id: list(anns)\n for img_id, anns in groupby(\n coco[\"annotations\"], lambda ann: ann[\"image_id\"]\n )\n }\n\n scalabel: List[Frame] = []\n img_ids = sorted(img_id2img.keys())\n for img_id in tqdm(img_ids):\n img = img_id2img[img_id]\n size = ImageSize(width=img[\"width\"], height=img[\"height\"])\n\n if \"file_name\" in img:\n url: Optional[str] = img[\"file_name\"]\n else:\n url = None\n\n if (\n vid_id2name is not None\n and \"video_id\" in img\n and img[\"video_id\"] is not None\n ):\n video_name: Optional[str] = vid_id2name[img[\"video_id\"]]\n else:\n video_name = None\n if \"frame_id\" in img:\n frame_index = img[\"frame_id\"]\n else:\n frame_index = None\n\n labels: Optional[List[Label]] = None\n if img_id in img_id2anns:\n labels = []\n anns = sorted(img_id2anns[img_id], key=lambda ann: ann[\"id\"])\n for i, ann in enumerate(anns):\n label = Label(\n id=ann.get(\n \"scalabel_id\", str(ann.get(\"instance_id\", ann[\"id\"]))\n ),\n index=i + 1,\n attributes=dict(\n crowd=bool(ann[\"iscrowd\"]), ignored=bool(ann[\"ignore\"])\n ),\n category=cat_id2name[ann[\"category_id\"]],\n )\n if \"score\" in ann:\n label.score = ann[\"score\"]\n if \"bbox\" in ann and ann[\"bbox\"] is not None:\n label.box2d = bbox_to_box2d(ann[\"bbox\"])\n if \"segmentation\" in ann:\n # Currently only support conversion from polygon.\n assert isinstance(ann[\"segmentation\"], list)\n label.poly2d = polygon_to_poly2ds(ann[\"segmentation\"])\n labels.append(label)\n\n scalabel.append(\n Frame(\n name=os.path.split(img[\"file_name\"])[-1],\n url=url,\n size=size,\n videoName=video_name,\n frameIndex=frame_index,\n labels=labels,\n )\n )\n\n return scalabel, config", "def coco_format(type_, id_list, annotation_url_list, file_list, result_list, label_list, coco_flag=0):\n annotations = []\n for i, result in enumerate(result_list):\n temp = {}\n annotation_url = annotation_url_list[i]\n file_path = file_list[i]\n temp['id'] = id_list[i]\n temp['annotation'] = []\n im = cv2.imread(file_path)\n height, width, _ = im.shape\n if result.shape[0] == 0:\n temp['annotation'] = json.dumps(temp['annotation'])\n annotations.append(temp)\n with open(annotation_url, 'w') as w:\n w.write(temp['annotation'])\n continue\n else:\n for j in range(result.shape[0]):\n cls_id = int(result[j][0]) + 1 + coco_flag\n x1 = result[j][1]\n x2 = result[j][3]\n y1 = result[j][2]\n y2 = result[j][4]\n score = result[j][5]\n width = max(0, x2 - x1)\n height = max(0, y2 - y1)\n if cls_id in label_list:\n temp['annotation'].append({\n 'area': width * height,\n 'bbox': [x1, y1, width, height],\n 'category_id': cls_id,\n 'iscrowd': 0,\n 'segmentation': [[x1, y1, x2, y1, x2, y2, x1, y2]],\n 'score': score\n })\n if type_ == 2 and len(temp['annotation']) > 0:\n temp['annotation'] = [temp['annotation'][0]]\n temp['annotation'][0].pop('area')\n temp['annotation'][0].pop('bbox')\n temp['annotation'][0].pop('iscrowd')\n temp['annotation'][0].pop('segmentation')\n temp['annotation'] = json.dumps(temp['annotation'])\n annotations.append(temp)\n with open(annotation_url, 'w') as wr:\n wr.write(temp['annotation'])\n return annotations", "def yolo_object_detection(image_filename, net, confidence, threshold, labels, colors):\n # read image file\n # image is an array of image data (row, column, channel)\n image = cv2.imread(image_filename)\n (H, W) = image.shape[:2]\n\n # preprocess image data with rescaling and resizing to fit YOLO input shape\n # OpenCV assumes BGR images: we have to convert to RGB, with swapRB=True\n blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n\n # set a new input to the network\n net.setInput(blob)\n\n # get YOLOv3's output layer names\n ln = net.getLayerNames()\n ln_out = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n # perform object detection\n layerOutputs = net.forward(ln_out)\n\n\n # Get the result from outputs, and filter them by confidence\n boxes = []\n scores = []\n classes = []\n for output in layerOutputs: # There are three output layers in YOLO v3\n # Filter outputs by confidence\n (xywh_filterd, score_filtered, class_filtered) = filter_outputs(output, confidence)\n\n boxes.append(xywh_filterd)\n scores.append(score_filtered)\n classes.append(class_filtered)\n\n # Change shapes of arrays so that all boxes from any output layers are stored together\n boxes = np.vstack([r for r in boxes])\n scores = np.concatenate([r for r in scores], axis=None)\n classes = np.concatenate([r for r in classes], axis=None)\n\n # Apply Non-max supression\n boxes_coord = rescale_box_coord(boxes, W, H)\n nms_idx = yolo_non_max_supression(boxes_coord, scores, confidence, threshold)\n \n # filter the good ones\n return image, [{'box':boxes[_], 'score':scores[_], 'class':classes[_]} for _ in nms_idx]", "def __get_coco_masks(anns: list, img):\n width = img.shape[1]\n height = img.shape[0]\n\n if len(anns) == 0:\n raise AnnError('Empty annotation detected.')\n \n classes = []\n mask = np.zeros((height, width), dtype=np.int32)\n \n for inst_idx, ann in enumerate(anns):\n cat_id = ann['category_id']\n classes.append(cat_id)\n\n m = cocoseg_to_binary(ann['segmentation'], height, width) # zero one mask\n m = m.astype(np.int32) * (inst_idx + 1)\n mask[m > 0] = 255\n\n classes = np.asarray(classes)\n return mask, classes", "def get_data(input_path):\n all_imgs = []\n classes_count = {}\n class_mapping = {}\n\n # parsing Flag\n visualise = False\n\n # MSCOCO directory\n data_path = input_path\n\n print('Parsing annotation files')\n annot_path = os.path.join(data_path, 'annotations_bbox')\n imgs_path = os.path.join(data_path, 'images')\n\n # images directory (train, val, trainval, test)\n imgsets_path_trainval = os.path.join(data_path, 'images', 'trainval.txt')\n imgsets_path_train = os.path.join(data_path, 'images', 'train.txt')\n imgsets_path_val = os.path.join(data_path, 'images', 'val.txt')\n imgsets_path_test = os.path.join(data_path, 'images', 'test.txt')\n\n trainval_files = []\n train_files = []\n val_files = []\n test_files = []\n\n with open(imgsets_path_trainval) as f:\n for line in f:\n trainval_files.append(line.strip())\n\n with open(imgsets_path_train) as f:\n for line in f:\n train_files.append(line.strip())\n\n with open(imgsets_path_val) as f:\n for line in f:\n val_files.append(line.strip())\n\n # test-set (default) not included in MSCOCO\n if os.path.isfile(imgsets_path_test):\n with open(imgsets_path_test) as f:\n for line in f:\n test_files.append(line.strip())\n\n # annotation read\n annots_train = json.load(open(os.path.join(annot_path, 'bbox_train2017.json'), 'r'))\n annots_val = json.load(open(os.path.join(annot_path, 'bbox_val2017.json'), 'r'))\n annots = dict()\n annots['train'] = annots_train\n annots['val'] = annots_val\n\n for part in ['train', 'val']:\n annots_keys = tqdm(annots[part].keys())\n for img_name in annots_keys:\n annots_keys.set_description(\"Processing %s\" % img_name)\n for bbox in annots[part][img_name]:\n class_name = bbox['label'].replace(' ', '')\n all_imgs.append({\n \"filepath\": os.path.join(data_path, 'images', '%s2017' % part, \"%s.jpg\" % img_name),\n \"width\": None,\n \"height\": None,\n \"bboxes\": [{\n \"class\": class_name,\n \"x1\": bbox['bbox']['x1'],\n \"y1\": bbox['bbox']['x2'],\n \"x2\": bbox['bbox']['y1'],\n \"y2\": bbox['bbox']['y2'],\n \"difficult\": False\n }],\n \"image_id\": img_name,\n \"imageset\": part\n })\n if class_name not in classes_count:\n classes_count[class_name] = 1\n else:\n classes_count[class_name] += 1\n if class_name not in class_mapping:\n class_mapping[class_name] = len(class_mapping)\n\n # visualise bounding boxes\n if visualise:\n img = cv2.imread(annotation_data['filepath'])\n for bbox in annotation_data['bboxes']:\n cv2.rectangle(img, (bbox['x1'], bbox['y1']), (bbox['x2'], bbox['y2']), (0, 0, 255))\n cv2.imshow('img', img)\n print(annotation_data['imageset'])\n cv2.waitKey(0)\n\n return all_imgs, classes_count, class_mapping", "def _load_all(self, anno_file, shuffle):\n image_set_index = []\n labels = []\n coco = COCO(anno_file)\n img_ids = coco.getImgIds()\n #print(img_ids)\n cars=[3,6,8]\n pedestrians=[1]\n cyclists=[2,4]\n lights=[10]\n signs=[13]\n\n apex_categories=cars+pedestrians+cyclists+lights+signs\n cnt=0\n humanonly=0\n human_count=0\n\n for img_id in img_ids:\n relevant=False\n # filename\n image_info = coco.loadImgs(img_id)[0]\n filename = image_info[\"file_name\"]\n #print(filename)\n #subdir = filename.split('_')[1]\n height = image_info[\"height\"]\n width = image_info[\"width\"]\n # label\n anno_ids = coco.getAnnIds(imgIds=img_id)\n annos = coco.loadAnns(anno_ids)\n label = []\n\n #print(\"listing categories for filename: \"+filename)\n\n hashumans=False\n for anno in annos:\n cat_id = int(anno[\"category_id\"])\n if(cat_id in apex_categories):\n cat_reduced= 0 if (cat_id in cars) else 1 if(cat_id in pedestrians) else 2 if(cat_id in cyclists) else 3 if(cat_id in lights) else 4\n bbox = anno[\"bbox\"]\n assert len(bbox) == 4\n xmin = float(bbox[0]) / width\n ymin = float(bbox[1]) / height\n xmax = xmin + float(bbox[2]) / width\n ymax = ymin + float(bbox[3]) / height\n label.append([cat_reduced, xmin, ymin, xmax, ymax, 0])\n #print(\"category: %d\"%cat_reduced)\n if (cat_id in pedestrians):\n hashumans=True\n if(cat_id not in pedestrians): #at least one non-person object is necessary\n relevant=True\n\n if(label and not relevant):\n humanonly+=1\n if label and relevant:\n if(hashumans):\n human_count+=1\n #print(\"adding \"+filename)\n labels.append(np.array(label))\n image_set_index.append(os.path.join(self.set, filename))\n cnt+=1\n print(\"added %d images\"%cnt)\n print(\"%d images has only humans\"%humanonly)\n print(\"%d registered images has humans\"%human_count)\n\n if shuffle:\n import random\n indices = range(len(image_set_index))\n random.shuffle(indices)\n image_set_index = [image_set_index[i] for i in indices]\n labels = [labels[i] for i in indices]\n # store the results\n self.image_set_index = image_set_index\n self.labels = labels", "def get_category_mapping_from_coco_file(coco_file_path: str) -> dict:\n # check if coco file is valid and read it\n (coco_dict, response) = read_and_validate_coco_annotation(coco_file_path)\n\n # raise error if coco file is not valid\n if not (response):\n raise TypeError\n\n coco_categories = coco_dict[\"categories\"]\n category_mapping = {\n str(coco_category[\"id\"]): coco_category[\"name\"]\n for coco_category in coco_categories\n }\n return category_mapping", "def detect_image_objects(gray, detect_params, detect_type=\"all\", label=-1, verbose=False):\n if detect_type == \"all\":\n detected_rects = detect_all_objects(gray, verbose=verbose, **detect_params)\n elif detect_type == \"primary\":\n detected_rects = detect_primary_objects(gray, verbose=verbose, **detect_params)\n else:\n print(f\"Unrecongized input value for detect_type, {detect_type}, so no objects were detected!\")\n print(\"Please provide a string value for detect_type of either 1) 'all' or 2) 'primary'\")\n detected_rects = None\n if isinstance(detected_rects, np.ndarray):\n features_labels = get_detected_features_labels(gray, detected_rects, label=label, verbose=verbose)\n return features_labels", "def load_coco_ann_files(self):\n if self.type == 'train':\n datasets = [\n (os.path.join(self.dataset_root, 'coco', 'train2014'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2014', 'person_keypoints_train2014.json'))),\n (os.path.join(self.dataset_root, 'coco', 'train2017'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2017', 'person_keypoints_train2017.json'))),\n # (os.path.join(self.dataset_root, 'mpii', 'images'),\n # COCO(os.path.join(self.dataset_root, 'mpii',\n # 'annotations', 'train.json')))\n ]\n else:\n datasets = [\n (os.path.join(self.dataset_root, 'coco', 'val2014'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2014', 'person_keypoints_val2014.json'))),\n (os.path.join(self.dataset_root, 'coco', 'val2017'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2017', 'person_keypoints_val2017.json')))\n ]\n\n dict_list = []\n for dataset_path, dataset in datasets:\n img_ids = dataset.getImgIds()\n\n for idx in img_ids:\n try:\n img = dataset.loadImgs([idx])[0]\n ann_ids = dataset.getAnnIds([idx])\n anns = dataset.loadAnns(ann_ids)\n\n if [ann['keypoints'] for ann in anns] and not all([ann['keypoints'] == [0]*51 for ann in anns]):\n keypoints = [ann['keypoints'] for ann in anns if ann['keypoints'] != [0]*51]\n for i in range(len(keypoints)):\n if 'coco' in dataset_path:\n keypoints[i] = keypoints[i] + ([0, 0, 0] if not (keypoints[i][17] and keypoints[i][20])\n else [(keypoints[i][15] + keypoints[i][18]) // 2, (keypoints[i][16] + keypoints[i][19]) // 2, 1])\n else:\n keypoints[i] = keypoints[i] + ([0, 0, 0] if not (keypoints[i][41] and keypoints[i][38])\n else [(keypoints[i][39] + keypoints[i][36]) // 2, (keypoints[i][40] + keypoints[i][37]) // 2, 1])\n\n if len([kp for kp in keypoints if kp != [0]*54]) <= 4:\n dict_list.append({'path': os.path.join(dataset_path, img[\"file_name\"]),\n 'keypoints': [kp for kp in keypoints if kp != [0]*54]})\n except:\n print(f'Skipped: {idx}')\n\n final_dataset = pd.DataFrame.from_dict(dict_list)\n\n return final_dataset", "def detect_labels(path):\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.label_detection(image=image)\n labels = response.label_annotations\n #print('Labels:')\n\n #for label in labels:\n # print(label.description)\n return labels", "def detect_all_objects(gray, haar_file, params, verbose=False):\n # Not the most performant to load haar_cascade for each image when params aren't changing...\n haar_cascade = cv.CascadeClassifier(haar_file)\n detected_objects = haar_cascade.detectMultiScale(gray, **params)\n if verbose:\n print(f\"# of Objects Detected = {len(detected_objects)}\")\n return detected_objects", "def masks_to_coco(\n image_dir: Path,\n mask_dir: Path,\n color_to_category_map: Mapping[str, str],\n image_to_mask_pattern=None,\n):\n if not image_dir.is_dir():\n raise NotADirectoryError(f\"Not a directory: {image_dir}\")\n if not mask_dir.is_dir():\n raise NotADirectoryError(f\"Not a directory: {mask_dir}\")\n if image_to_mask_pattern is None:\n image_to_mask_pattern = r\"$.*\\.(?=[^.]+$)\"\n\n categories = []\n color_to_idx_map = {}\n for i, (color, name) in enumerate(color_to_category_map.items()):\n categories.append({\"id\": i, \"name\": name})\n color_to_idx_map[color.lower()] = i\n\n def _image_name_to_mask(name):\n try:\n return re.search(image_to_mask_pattern, name).group() + \".png\"\n except AttributeError:\n raise ValueError(\n f\"Could not extract mask filename from {name} \"\n f\"using pattern {repr(image_to_mask_pattern)}\"\n )\n\n images = []\n annotations = []\n colors_not_found = set()\n categories_found = set()\n for path in sorted(image_dir.glob(\"*.*\")):\n if path.name.startswith(\".\"):\n # real glob excludes these\n continue\n mask_path = mask_dir / (_image_name_to_mask(path.name))\n if not mask_path.exists():\n raise FileNotFoundError(\n f\"No mask found at {mask_path} for image named {path.name}.\"\n )\n dims = get_image_dimensions(mask_path)\n if get_image_dimensions(path) != dims:\n raise ValueError(\n f\"Got inconsistent dimensions for image \"\n f\"({get_image_dimensions(path)}) and mask ({dims})\"\n )\n\n segmentations = generate_segmentations(\n mask_path, color_to_idx_map, colors_not_found\n )\n for rle, cat_idx in segmentations:\n categories_found.add(cat_idx)\n bbox = list(map(int, pycocotools.mask.toBbox(rle)))\n annotation = {\n \"id\": len(annotations),\n \"image_id\": len(images),\n \"category_id\": cat_idx,\n \"segmentation\": rle,\n # \"is_crowd\": 0, # TODO: how should we define this?\n \"bbox\": bbox,\n }\n annotations.append(annotation)\n\n images.append({\"id\": len(images), \"file_name\": path.name, **dims})\n\n if not images:\n raise ValueError(f\"No images found in {image_dir}\")\n\n if len(colors_not_found) > 1:\n raise ValueError(\n f\"Expected at most one color to not be mapped to a category. \"\n f\"Got {len(colors_not_found)}: {', '.join(f'#{x}' for x in sorted(colors_not_found))}.\"\n )\n if len(categories_found) != len(categories):\n missing_category_names = {\n cat[\"name\"] for cat in categories if cat[\"id\"] not in categories_found\n }\n missing_category_colors = {\n color: name\n for color, name in color_to_category_map.items()\n if name in missing_category_names\n }\n\n warnings.warn(\n f\"{len(categories)} categories defined, but only \"\n f\"{len(categories_found)} of these are present in masks. \"\n f\"These categories were not found: {missing_category_colors}\"\n )\n\n out = {\n \"images\": images,\n \"annotations\": annotations,\n \"info\": {},\n \"categories\": categories,\n }\n\n return out", "def prepare_train_coco_data(args):\n image_dir, annotation_file, data_dir = args.train_coco_image_dir, args.train_coco_annotation_file, args.train_coco_data_dir\n batch_size = args.batch_size\n basic_model = args.basic_model\n num_roi = args.num_roi\n\n coco = COCO(annotation_file)\n\n img_ids = list(coco.imgToAnns.keys())\n img_files = []\n img_heights = []\n img_widths = []\n anchor_files = []\n gt_classes = []\n gt_bboxes = []\n\n for img_id in img_ids:\n img_files.append(os.path.join(image_dir, coco.imgs[img_id]['file_name'])) \n img_heights.append(coco.imgs[img_id]['height']) \n img_widths.append(coco.imgs[img_id]['width']) \n anchor_files.append(os.path.join(data_dir, os.path.splitext(coco.imgs[img_id]['file_name'])[0]+'_'+basic_model+'_anchor.npz')) \n\n classes = [] \n bboxes = [] \n for ann in coco.imgToAnns[img_id]: \n classes.append(coco_category_to_class[ann['category_id']]) \n bboxes.append([ann['bbox'][1], ann['bbox'][0], ann['bbox'][3]+1, ann['bbox'][2]+1]) \n\n gt_classes.append(classes) \n gt_bboxes.append(bboxes) \n \n print(\"Building the training dataset...\")\n dataset = DataSet(img_ids, img_files, img_heights, img_widths, batch_size, anchor_files, gt_classes, gt_bboxes, True, True)\n print(\"Dataset built.\")\n return coco, dataset", "def get_object_detections(self):\n detections = self.__get_cropped_detections(self.image)\n return detections", "def classifier():\n\tprint(\"Classifying\")\n\t#initialize important variables\n\tminConfidence = 0.5\n\tthresholdValue = 0.3\n\t\n\t\"\"\"\n\tfile = request.files#['image']\n\tfile.save(\"./classifier_image.jpg\")\n\tframe = cv2.imread(\"./classifier_image.jpg\")\n\t\"\"\"\n\tfile = request.json\n\tframe = np.array(file[\"Frame\"], dtype = \"uint8\") \n\n\t#file = request.files['image']\n\t#file.save(\"./classifier_image.jpg\")\n\t#frame = cv2.imread(\"./classifier_image.jpg\")\n\t#file = request.json\n\t#frame = np.array(file[\"contour\"], dtype=\"uint8\")\n\t\n\t#Get Image dimensions\n\timage = cv2.copyMakeBorder(frame, 30, 30, 30, 30, cv2.BORDER_CONSTANT, value=255)\n\t(H, W) = image.shape[:2]\n\t\n\t#Get the output layers parameters\n\tln = net.getLayerNames()\n\tln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\t\n\t#Create a blob to do a forward pass\n\tblob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n\tnet.setInput(blob)\n\t#print(H, W)\n\tlayerOutputs = net.forward(ln)\n\tprint(type(net))\n\tboxes = []\n\tconfidences = []\n\tclassIDs = []\n\tfor output in layerOutputs:\n\t\tprint(\"detecting\")\n\t\t#loop over each detection\n\t\tfor detection in output:\n\t\t\t# extract the class ID and confidence (i.e., probability) of\n\t\t\t# the current object detection\n\t\t\tscores = detection[5:]\n\t\t\tclassID = np.argmax(scores)\n\t\t\tconfidence = scores[classID]\n\n\t\t\t# filter out weak predictions by ensuring the detected\n\t\t\t# probability is greater than the minimum probability\n\t\t\tif confidence > minConfidence:\n\t\t\t\t# scale the bounding box coordinates back relative to the\n\t\t\t\t# size of the image, keeping in mind that YOLO actually\n\t\t\t\t# returns the center (x, y)-coordinates of the bounding\n\t\t\t\t# box followed by the boxes' width and height\n\t\t\t\tbox = detection[0:4] * np.array([W, H, W, H])\n\t\t\t\t(centerX, centerY, width, height) = box.astype(\"int\")\n\n\t\t\t\t# use the center (x, y)-coordinates to derive the top and\n\t\t\t\t# and left corner of the bounding box\n\t\t\t\tx = int(centerX - (width / 2))\n\t\t\t\ty = int(centerY - (height / 2))\n\n\t\t\t\t# update our list of bounding box coordinates, confidences,\n\t\t\t\t# and class IDs\n\t\t\t\tboxes.append([x, y, int(width), int(height)])\n\t\t\t\tconfidences.append(float(confidence))\n\t\t\t\tclassIDs.append(classID)\n\n\t# apply non-maxima suppression to suppress weak, overlapping bounding\n\t# boxes\n\tidxs = cv2.dnn.NMSBoxes(boxes, confidences, minConfidence, thresholdValue)\n\n\t# ensure at least one detection exists\n\tif len(idxs) > 0:\n\t\toutput = json.load(open(outputFile))\n\t\t# loop over the indexes we are keeping\n\t\tfor i in idxs.flatten():\n\t\t\t# extract the bounding box coordinates\n\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\n\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\n\n\t\t\tprint(LABELS[classIDs[i]], output[LABELS[classIDs[i]]]+1, confidences[i])\n\t\t\toutput[LABELS[classIDs[i]]]+=1\n\t\t\n\t\tjson.dump(output, open(outputFile, \"w\"))\n\t\treturn LABELS[classIDs[i]]\n\telse:\n\t\treturn Response(status=200)", "def detect_objects(self, image):\n # Feed the input image to the model\n self.set_input_tensor(image)\n self.model.invoke()\n\n # Get all outputs from the model\n boxes = self.get_output_tensor(0)\n classes = self.get_output_tensor(1)\n scores = self.get_output_tensor(2)\n count = int(self.get_output_tensor(3))\n\n results = []\n for i in range(count):\n result = {\n 'bounding_box': boxes[i],\n 'class_id': int(classes[i]),\n 'score': scores[i]\n }\n results.append(result)\n return results", "def detectObjects(image):\n\tgrayscale = cvCreateImage(cvSize(image.width, image.height), 8, 1)\n\tcvCvtColor(image, grayscale, CV_BGR2GRAY)\n\n\tstorage = cvCreateMemStorage(0)\n\tcvClearMemStorage(storage)\n\tcvEqualizeHist(grayscale, grayscale)\n\tcascade = cvLoadHaarClassifierCascade(\n\t\t'/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml',\n\t\tcvSize(1,1))\n\n\tscalefactor = 1.1 # How much to increase window size each pass\n\tminsize = 50 # Smallest face to detect. Up this if you have small falsepositives\n\tfaces = cvHaarDetectObjects(grayscale, cascade, storage, scalefactor, 50,\n\t\t\t\tCV_HAAR_DO_CANNY_PRUNING, cvSize(minsize, minsize))\n\n\treturn [(f.x, f.y, f.x + f.width, f.y + f.height) for f in faces]", "def decode_coco_detection_example(example, input_range=None):\n image = tf.image.convert_image_dtype(example['image'], dtype=tf.float32)\n\n ### normalize\n if input_range:\n image = image * (input_range[1] - input_range[0]) + input_range[0]\n else:\n mean_rgb = tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=tf.float32)\n std_rgb = tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=tf.float32)\n image = (image - mean_rgb) / std_rgb\n\n boxes = decode_boxes(example['objects']['bbox'], tf.shape(image)[0:2])\n\n target = {\n 'area': example['objects']['area'],\n 'boxes': boxes,\n 'objects/id': example['objects']['id'],\n 'is_crowd': example['objects']['is_crowd'],\n 'labels': example['objects']['label'] + 1, # 0'th class is padding.\n }\n\n # Filters objects to exclude degenerate boxes.\n keep = tf.where(tf.logical_and(boxes[:, 2] > boxes[:, 0],\n boxes[:, 3] > boxes[:, 1]))[:, 0]\n target_kept = {k: tf.gather(v, keep) for k, v in target.items()}\n target_kept = target\n\n target_kept['orig_size'] = tf.cast(tf.shape(image)[0:2], dtype=tf.int32)\n target_kept['size'] = tf.identity(target_kept['orig_size'])\n target_kept['image/id'] = example['image/id']\n\n return {\n 'inputs': image,\n 'label': target_kept,\n }", "def get_convos():\n file_path = os.path.join(config.DATA_PATH, config.CONVO_FILE)\n convos = []\n with open(file_path, 'rb') as f:\n for line in f.readlines():\n parts = line.split(b' +++$+++ ')\n if len(parts) == 4:\n convo = []\n for line in parts[3][1:-2].split(b', '):\n convo.append(line[1:-1])\n convos.append(convo)\n\n return convos", "def merge_coco_annotations(anno_file_list, save_dir, pre_fixs, keep_cls=13):\n annos = mmcv.load(anno_file_list[0])\n categories =[info for info in annos['categories'] if info['id'] == keep_cls]\n info = annos['info']\n licenses = annos['licenses']\n del annos\n\n new_coco_annotations = {}\n new_annotations = []\n new_images = []\n image_idx_offset = 0\n anno_idx_offset = 0\n for file_i, file_path in tqdm(enumerate(anno_file_list), total=len(anno_file_list)):\n annos = mmcv.load(file_path)\n cur_images = annos['images']\n cur_annotations = annos['annotations']\n for img_info in cur_images: # loop through image_infos\n img_info['id'] += image_idx_offset\n img_info['file_name'] = os.path.join(pre_fixs[file_i], img_info['file_name'])\n new_images.append(img_info)\n for anno_info in cur_annotations: # loop through boxes\n anno_info['id'] += anno_idx_offset\n anno_info['image_id'] += image_idx_offset\n cat_id = keep_cls if anno_info['category_id'] == keep_cls else 15\n anno_info['category_id'] = cat_id\n new_annotations.append(anno_info)\n\n image_idx_offset += len(cur_images)\n anno_idx_offset += len(cur_annotations)\n\n new_coco_annotations['info'] = info\n new_coco_annotations['licenses'] = licenses\n new_coco_annotations['categories'] = categories\n new_coco_annotations['annotations'] = new_annotations\n new_coco_annotations['images'] = new_images\n\n print('convert done\\nsaving coco annotations')\n mmcv.dump(new_coco_annotations, save_dir)\n print('all done!')", "def process_image(file_path):\n img_array = io.imread(file_path)\n detections, shapes, descriptors = detect_faces(person_database,img_array)\n\n names = []\n\n for desc in descriptors:\n name = find_match(person_database, desc)\n names.append(name)\n\n return pic_array, names, detections, shapes, descriptors", "def get_coco_gt(self, coco, img_id, height, width, img_name):\n annIds = coco.getAnnIds(imgIds=[img_id], iscrowd=None)\n # assert annIds is not None and annIds > 0, 'No annotation for %s' % str(img_id)\n anns = coco.loadAnns(annIds)\n # assert len(anns) > 0, 'No annotation for %s' % str(img_id)\n masks = []\n classes = []\n bboxes = []\n\n for ann in anns:\n id = cat_id_to_real_id(ann['category_id'])\n # id = coco_cat_id_to_voc_id(ann['category_id'])\n if id != 0:\n classes.append(id)\n\n m = coco.annToMask(ann) # {0, 1} mask\n assert m.shape[0] == height and m.shape[1] == width, \\\n 'image %s and ann %s don''t match' % (img_id, ann)\n masks.append(m)\n\n bboxes.append(ann['bbox'])\n\n masks = np.asarray(masks)\n classes = np.asarray(classes)\n bboxes = np.asarray(bboxes)\n\n # to x1, y1, x2, y2\n num_classes = bboxes.shape[0]\n if num_classes <= 0:\n bboxes = np.zeros([0, 4], dtype=np.float32)\n classes = np.zeros([0], dtype=np.float32)\n num_classes = 0\n print('None Annotations %s' % img_name)\n bboxes[:, 2] = bboxes[:, 0] + bboxes[:, 2]\n bboxes[:, 3] = bboxes[:, 0] + bboxes[:, 3]\n\n bboxes = bboxes.astype(np.float32)\n classes = classes.astype(np.float32)\n masks = masks.astype(np.uint8)\n assert masks.shape[0] == bboxes.shape[0], 'Shape Error'\n\n return num_classes, masks, bboxes, classes", "def detect_labels(path):\n client = vision.ImageAnnotatorClient()\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n image = vision.types.Image(content=content)\n response = client.label_detection(image=image)\n labels = response.label_annotations\n print('Labels:')\n return response", "def classify_images():\n\n # Load the desired image\n img_path = 'dataset/colorize_images/n02085782_919.jpg'\n img = image.load_img(img_path, target_size=(299, 299))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n\n model = InceptionV3(weights=\"imagenet\")\n preds = model.predict(x)\n # decode the results into a list of tuples (class, description, probability)\n # (one such list for each sample in the batch)\n print('Predicted:', decode_predictions(preds, top=3)[0])" ]
[ "0.6292812", "0.62888265", "0.602774", "0.5988227", "0.5976888", "0.5939347", "0.5826487", "0.58208746", "0.5818549", "0.58161163", "0.5813985", "0.57310075", "0.5711495", "0.56712824", "0.56664807", "0.5657252", "0.5646004", "0.5624283", "0.56121266", "0.5607481", "0.5555343", "0.555021", "0.55448014", "0.55428195", "0.5525682", "0.551334", "0.55084574", "0.5502459", "0.54788405", "0.54630345" ]
0.80364597
0