query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Find the desired state give a schedule
def _get_desired_state(schedule): current_hour = int(time.strftime("%H", time.gmtime())) current_week_day = time.strftime("%A", time.gmtime()).lower() start = schedule[current_week_day]['start'] stop = schedule[current_week_day]['stop'] state = 'stop' if current_hour >= start and current_hour < stop: state = 'start' return state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_current_state(self):\n schedule = parser.parse_schedule(SCHEDULE)\n self.assertEqual(\"a\", schedule.get_current_state(dt(monday, '09:30')))\n self.assertEqual(\"c\", schedule.get_current_state(dt(monday, '08:30')))\n self.assertIsNone(schedule.get_current_state(dt(saturday, '09:30'),\n lookback=False))", "def schedule_monitor(schedule):\n if schedule[\"state\"] == EC2State.STOPPED:\n if (date.today() - schedule[\"lastStateChange\"]).days >= 7 - schedule[\n \"schedule\"\n ]:\n schedule[\"state\"] = EC2State.STARTED\n elif schedule[\"state\"] == EC2State.STARTED:\n if (date.today() - schedule[\"lastStateChange\"]).days >= schedule:\n schedule[\"state\"] = EC2State.STOPPED\n else:\n return schedule, False\n\n return schedule, True", "def test_retrieve_instances_schedule_state(self):\n pass", "def search(state=None, seconds=60, opening_look_up=True, end_game_look_up=False):\n assert isinstance(state, chess.Board)\n if state.is_game_over():\n return None, state\n manager = time_management.TimeManager(time_control=time_management.Clock(base_time=seconds))\n manager.allocate_time = lambda: seconds\n search_copy = state.copy()\n manager.perform_search(board=search_copy,\n look_up_in_opening=opening_look_up,\n look_up_in_end_game=end_game_look_up)\n copy = state.copy()\n copy.push(manager.decision)\n return manager.decision, copy", "def state(self):\n # The default state is for a course that has no course runs\n best_state = CourseState(CourseState.TO_BE_SCHEDULED)\n\n for course_run in self.course_runs.only(\n \"start\", \"end\", \"enrollment_start\", \"enrollment_end\"\n ):\n state = course_run.state\n if state < best_state:\n best_state = state\n if state[\"priority\"] == CourseState.ONGOING_OPEN:\n # We found the best state, don't waste more time\n break\n return best_state", "def test_looking_back_to_prior_state(self):\n schedule = parser.parse_schedule(SCHEDULE)\n self.assertIsNone(schedule.get_latest_event(\n dt(saturday, '09:30'), lookback=False))\n self.assertEqual((dt(friday, '11:00'), \"c\"),\n schedule.get_latest_event(dt(saturday, '09:30')))", "def find_state(state, dbsession):\n\n\treturn dbsession.query(db.State).filter_by(state_abbreviation=state).first()", "def solve(self, state, times):", "def get_winner(state):\n\n if", "def next_state_func(self, state, action, Time_matrix):\n next_state = []\n \n # Initialize various times\n total_time = 0\n pickup_time = 0 # time from current location to pickup location\n waiting_time = 0 # time if driver to refuse all requests\n drop_time = 0 # time from Pick-up point to drop point\n \n # getting the current location, time, day and request locations\n curr_loc = state[0]\n curr_time = state[1]\n curr_day = state[2]\n pickup_loc = action[0]\n drop_loc = action[1]\n \n # 1. driver refuse to requests\n # so wait time is 1 unit, next location is current location\n if ((pickup_loc== 0) and (drop_loc == 0)):\n waiting_time = 1\n next_loc = curr_loc\n \n # 2. cab is already at pick up point\n #if current cab position is same as pick up position\n elif (curr_loc == pickup_loc):\n drop_time = Time_matrix[curr_loc][drop_loc][curr_time][curr_day]\n \n # next location is the drop location\n next_loc = drop_loc\n # 3. cab is not at the pickup point\n else:\n # Driver is away to pickup point, he has to travel to pickup point first\n # time take to reach pickup point\n pickup_time = Time_matrix[curr_loc][pickup_loc][curr_time][curr_day]\n new_time, new_day = self.new_time_day(curr_time, curr_day, pickup_time)\n \n # we calculated pickup Time, now time taken to drop\n drop_time = Time_matrix[pickup_loc][drop_loc][new_time][new_day]\n next_loc = drop_loc\n\n # Calculate total time as sum of all durations\n total_time = (waiting_time + pickup_time + drop_time)\n next_time, next_day = self.new_time_day(curr_time, curr_day, total_time)\n \n # Construct next_state using the next_loc and the new time states.\n next_state = [next_loc, next_time, next_day]\n \n return next_state, waiting_time, pickup_time, drop_time", "def get_state_by_time(python_time):\n present = datetime.now()\n\n if python_time <= present:\n return 2\n else:\n return 1", "def state(self, state):\n def what(s, switch):\n if switch:\n return s.label()\n else:\n return s\n switch = is_FSMState(state)\n\n try:\n return self._states_dict_[what(state, switch)]\n except AttributeError:\n for s in self.iter_states():\n if what(s, not switch) == state:\n return s\n except KeyError:\n pass\n raise LookupError(\"No state with label %s found.\" % (what(state, switch),))", "def find_by_status(self, host, state):", "def _get_state(self):\n start = self.design.first_unassigned_site\n return self.target.padded_encoding[\n start : start + 2 * self._env_config.state_radius + 1\n ]", "def get_state(self):\n return PLANET_STATES[self.state][0]", "def get_state(self):\n return PLANET_STATES[self.state][0]", "def get_airplane_state(self, airplane_instance, chosen_time_and_date):\r\n\r\n chosen_airplane = airplane_instance\r\n voyages_list = self.ioAPI.load_all_voyages() # List of all voyages\r\n airplane_state = \"IDLE\" # initializes the airplane state at IDLE\r\n NOW = datetime.datetime.fromisoformat(chosen_time_and_date)\r\n\r\n for voyage in voyages_list:\r\n voyage_plane = voyage.get_plane_id()\r\n\r\n departure_out = datetime.datetime.fromisoformat(voyage.get_departure_out())\r\n arrival_out = datetime.datetime.fromisoformat(voyage.get_arrival_out())\r\n departure_home = datetime.datetime.fromisoformat(voyage.get_departure_home())\r\n arrival_home = datetime.datetime.fromisoformat(voyage.get_arrival_home())\r\n available = arrival_home + datetime.timedelta(hours = 1)\r\n \r\n if voyage_plane == chosen_airplane:\r\n if departure_out <= NOW and arrival_home > NOW:\r\n break \r\n else:\r\n return airplane_state\r\n \r\n if departure_out <= NOW and NOW <= arrival_out:\r\n airplane_state = \"Flight {} is on its way to {} and will be available again on: {}\".format(voyage.get_flight_number_out(), voyage.get_dest_id(), available)\r\n elif departure_home <= NOW and NOW <= arrival_home:\r\n airplane_state = \"Flight {} is on its way to KEF and will be available again on: {}\".format(voyage.get_flight_number_back(), available)\r\n elif arrival_out <= NOW and NOW <= departure_home:\r\n airplane_state = \"IN INTERMISSION\" \r\n\r\n return airplane_state", "def get_action(self, state):\n\n best_action = None\n best_value = -np.inf\n actions = [0, 1, 2, 3] # left, down, right, up\n for a in actions:\n row = state // self.edge\n col = state % self.edge\n # print (row, col)\n if a == 0:\n col = max(col-1, 0)\n elif a == 1:\n row = min(row+1, self.edge-1)\n elif a == 2:\n col = min(col+1, self.edge-1)\n elif a == 3:\n row = max(row-1, 0)\n # print (row, col)\n\n new_state = row * self.edge + col\n # print (new_state)\n if (self.values[new_state] > best_value or new_state == self.num_states-1): #goal\n best_value = 1.0 if new_state == self.num_states-1 else self.values[new_state]\n best_action = a\n return best_action", "def findAirport(state):\n if state == \"NSW\":\n airport = \"Sydney Airport\"\n elif state == \"VIC\":\n airport = \"Melbourne Airport\"\n elif state == \"QLD\":\n airport = \"Brisbane Airport\"\n elif state == \"TAS\":\n airport = \"Hobart Airport\"\n elif state == \"WA\":\n airport = \"Perth Airport\"\n elif state == \"SA\":\n airport = \"Adelaide Airport\"\n elif state == \"NT\":\n airport = \"Darwin Airport\"\n return airport", "def next_state_func(self, state, action, Time_matrix):\n curr_loc, curr_time, curr_day = state\n pickup_loc, drop_loc = action\n \n rewards = self.reward_func(state, action, Time_matrix)\n total_time = 0\n \n if action == (0,0):\n # update time by 1 hour\n curr_time, curr_day = self.time_day_update_func(curr_time, curr_day, 1)\n next_state = (curr_loc, curr_time, curr_day)\n total_time = 1\n else:\n # time from curr_loc to reach pickup_loc\n t1 = int(Time_matrix[curr_loc][pickup_loc][curr_time][curr_day])\n curr_time, curr_day = self.time_day_update_func(curr_time, curr_day, t1)\n \n # time from pickup_loc to reach drop_loc\n t2 = int(Time_matrix[pickup_loc][drop_loc][curr_time][curr_day])\n curr_time, curr_day = self.time_day_update_func(curr_time, curr_day, t2)\n \n total_time = t1 + t2\n next_state = (drop_loc, curr_time, curr_day)\n \n return next_state, rewards, total_time", "def find_best_move(state: GameState) -> None:", "def actions(self, state):\n if (state == (3,3,1)): # if yes, send a missionary and a canniable to land B\n return (2,2,0)\n if (state == (2,2,0)): # if yes, send a missionary back to land A\n return (3,2,1)\n if (state == (3,2,1)): # if yes, send a missionary and a canniable to land B\n return (2,1,0)\n if (state == (2,1,0)): # if yes, send a missionary back to land A\n return (3,1,1)\n if (state == (3,1,1)): # if yes, send 2 missionary to land B\n return (1,1,0)\n if (state == (1,1,0)): # if yes, send a missionary and a canniable to land A\n return (2,2,1)\n if (state == (2,2,1)): # if yes, send 2 missionary to land B\n return (0,2,0)\n if (state == (0,2,0)): # if yes, send a missionary to land A\n return (1,2,1)\n if (state == (1,2,1)): # if yes, send a missionary and a canniable to land B\n return (0,1,0)\n if (state == (0,1,0)): # if yes, send a missionary to land A\n return (1,1,1)\n if (state == (1,1,1)): # if yes, send a missionary and a canniable to land B\n return (0,0,0)\n\n raise NotImplementedError", "def get_state(hass, utc_point_in_time, entity_id, run=None):\n states = get_states(hass, utc_point_in_time, (entity_id,), run)\n return states[0] if states else None", "def _get_selected_schedule(self, home_id: str) -> Dict:\n for value in self.schedules.get(home_id, {}).values():\n if \"selected\" in value.keys():\n return value\n\n return {}", "def get_action(self, state):\n self.visited = {}\n utility = -inf\n move = 'STOP'\n\n # We choose the successor with the maximum utility\n for successor in state.generatePacmanSuccessors():\n maxPlayer = True\n score = self.alphabeta(successor[0], -inf, +inf, maxPlayer)\n if utility < score:\n move = successor[1]\n utility = score\n\n # If there's no winning state, we try to to move farther from the ghost\n if utility == -inf:\n dist = -inf\n for successor in state.generatePacmanSuccessors():\n newDist = self.distanceFromGhost(successor[0])\n if not successor[0].isLose() and newDist > dist:\n move = successor[1]\n dist = newDist\n print(utility)\n return move", "def FindGroundstate(**args):\n\targs[\"imtime\"] = True\n\n\tprop = SetupProblem(**args)\n\n\tfor t in prop.Advance(10):\n\t\tE = prop.GetEnergy()\n\t\tprint \"t = %3.2f, E = %2.8f\" % (t, E)\n\n\treturn prop", "def next_state(self, debug=False):\n\n if self.current_state == 'NoObstacle':\n # First check if any obstacle is in sight\n if self.transitions.next_room_reached():\n self.current_state = 'RoomReached'\n elif self.transitions.obstacle_in_sight():\n self.current_state = 'Obstacle'\n\n elif self.current_state == 'Obstacle':\n # First check if obstacle is still in sight\n if self.transitions.no_obstacle_in_sight() and not self.transitions.obstacle_in_sight():\n self.current_state = 'NoObstacle'\n elif self.transitions.next_room_reached():\n self.current_state = 'RoomReached'\n\n elif self.current_state == 'RoomReached':\n self.current_state = 'InspectCorners'\n\n elif self.current_state == 'InspectCorners':\n if self.transitions.all_corners_inspected():\n if not self.transitions.all_rooms_visited():\n self.current_state = 'RotateToExit'\n else:\n self.current_state = 'Finished'\n\n elif self.current_state == 'RotateToExit':\n if self.transitions.aiming_to_carrot():\n self.current_state = 'NoObstacle'\n\n\n elif self.current_state == 'Finished':\n pass\n\n # DEBUG\n if debug:\n print 'Next state: %s' % self.current_state\n\n if self.current_state is not self.old_state:\n print self.current_state\n\n self.old_state = self.current_state\n\n return self.current_state", "def choose(self, state: State) -> State:", "def _get_msc_state_at_time(\n self,\n msc: Issue,\n msc_events: List[Tuple[IssueEvent, Optional[Label]]],\n dt: datetime,\n ) -> MSCState:\n # Iterate through MSC events and calculate the current state of the issue at a given\n # time\n # Initially assume it doesn't exist. Change the state as we iterate through events\n state = {\n \"prev_state\": None,\n \"state\": None,\n } # type: Dict[str, Optional[MSCState]]\n finished_fcp = False\n\n def update_state(new_state: MSCState):\n state[\"prev_state\"] = state[\"state\"]\n state[\"state\"] = new_state\n\n disposition_state = None\n is_closed = False\n has_label_merged = False\n rejected_or_abandoned = False\n for event, label in msc_events:\n if event.created_at > dt:\n # We've reached our datetime threshold\n break\n\n # Classify the event\n if label:\n label_name = label.name\n\n # This is a label event\n if label_name == \"proposal\":\n update_state(MSCState.NEW)\n elif label_name == \"final-comment-period\":\n update_state(MSCState.FCP)\n elif label_name == \"disposition-merge\":\n disposition_state = MSCState.MERGED\n elif label_name == \"disposition-close\":\n disposition_state = MSCState.CLOSED\n elif label_name == \"disposition-postpone\":\n disposition_state = MSCState.POSTPONED\n # Some issues have this silly label\n # i.e https://github.com/matrix-org/matrix-doc/issues/1466\n elif label_name == \"merged\":\n update_state(MSCState.MERGED)\n has_label_merged = True\n elif label_name == \"finished-final-comment-period\":\n # Prevent issues which have finished FCP but associated PRs have not\n # merged yet to not get stuck in FCP state forever.\n # i.e https://github.com/matrix-org/matrix-doc/issues/1219\n update_state(\n disposition_state if disposition_state else MSCState.NEW\n )\n finished_fcp = True\n elif label_name == \"abandoned\" or label_name == \"rejected\":\n update_state(MSCState.CLOSED)\n elif event.event == \"reopened\":\n # TODO: What does mscbot-python do in this case? New or previous state?\n update_state(state[\"prev_state\"])\n is_closed = False\n elif event.event == \"closed\":\n # The MSC was closed\n if msc.pull_request:\n if state != MSCState.MERGED:\n update_state(MSCState.CLOSED)\n # Issues that are closed count as closed MSCs\n else:\n if has_label_merged:\n update_state(MSCState.MERGED)\n else:\n update_state(MSCState.CLOSED)\n elif event.event == \"merged\":\n # The MSC was merged\n if finished_fcp:\n update_state(MSCState.MERGED)\n\n if is_closed and rejected_or_abandoned:\n update_state(MSCState.CLOSED)\n\n return state[\"state\"]", "def get_state(self, state_description, dt, class_=State):\n dt = pd.Timestamp(dt)\n names = state_description.keys()\n name_value_mapping = {name: self.get(name, dt) for name in names}\n return state_description.to_state(class_, **name_value_mapping)" ]
[ "0.71231353", "0.6688169", "0.6462357", "0.6322983", "0.59492326", "0.5888927", "0.5873696", "0.584331", "0.57127213", "0.5705844", "0.5693119", "0.5692573", "0.56763047", "0.5673177", "0.5662666", "0.5662666", "0.5624019", "0.5621992", "0.5611431", "0.5609096", "0.55962247", "0.55834854", "0.5579871", "0.55749446", "0.5569519", "0.55644256", "0.5557359", "0.5554417", "0.55512863", "0.55381113" ]
0.80120367
0
ELB does not send health checks after stopping/starting the instance. This method reregister the instances in the profile ELB's to start sending health checks again.
def reregister_elb_instances(profile): if 'elb_names' in profile: conn = elb_conn[profile['region']] elbs = conn.get_all_load_balancers(profile['elb_names']) for elb in elbs: instance_ids = _get_instance_ids(elb.instances) print "Reregistering " + elb.name + " instances." try: conn.deregister_instances(elb.name, instance_ids) conn.register_instances(elb.name, instance_ids) except Exception, e: print elb.name + "has no instances." # to avoid elb rate limit throttling time.sleep(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def schedule():\n for profile in schedules['profiles']:\n instances = _get_instances(profile['instance_tags'], profile['region'])\n start_stop_instances(instances, profile['schedule'])\n reregister_elb_instances(profile)", "def stop(self):\n if not self._started:\n return\n\n try:\n self._unregister()\n except Exception:\n logger.exception(\"Failed to unregister profile.\")\n self._started = False", "def deregister_instance(InstanceId=None):\n pass", "def teardown(self):\n # Only terminate if instance is running\n if self.instance:\n instance_status = aws.check_instance_status(self.config, self.instance.id)\n if instance_status == \"running\":\n aws.terminate_ec2_instance(self.config, self.instance.id)\n super().teardown()", "def singularity_rm(self):\n Client.instances(self.pid, quiet=self.quiet).stop()", "def finalizer():\n stopping_instances = {\n key: val for key, val in ec2_instances.items() if (\n aws_obj.get_instances_status_by_id(key) == constants.INSTANCE_STOPPING\n )\n }\n if stopping_instances:\n for stopping_instance in stopping_instances:\n instance = aws_obj.get_ec2_instance(stopping_instance.key())\n instance.wait_until_stopped()\n stopped_instances = {\n key: val for key, val in ec2_instances.items() if (\n aws_obj.get_instances_status_by_id(key) == constants.INSTANCE_STOPPED\n )\n }\n if stopped_instances:\n aws_obj.start_ec2_instances(instances=stopped_instances, wait=True)", "def deregister_elb_instances(elbclient, elbname, instance_ids):\r\n Instances = list(map(\r\n lambda x: {'InstanceId': x},\r\n instance_ids\r\n ))\r\n try:\r\n elbclient.deregister_instances_from_load_balancer(\r\n LoadBalancerName=elbname,\r\n Instances=Instances,\r\n DryRun=True\r\n )\r\n except Exception as ex:\r\n print(ex.message)\r\n return False\r\n return True", "def unregister():\n ip.events.unregister('post_run_cell', help_with_exception)", "def stop_monitoring(self):\n pass", "def stop(self) -> None:\n for instance in self.instances:\n instance.listener = None\n instance.stop()", "def _unregister(self):\n try:\n self._profilemgr_proxy.proxy.UnregisterProfile(\n HFP_DBUS_PROFILE_ENDPOINT)\n logger.debug(\"Unregistered HFP profile.\")\n except Exception:\n logger.exception(\"Error unregistering profile endpoint.\")\n\n self._profile = None", "def consul_deregister(self):\n try:\n if self.svc_name not in self.consul.agent.services():\n return\n self.log.info(\"consul-deregister\")\n self.consul.agent.service.deregister(\"qemu-{}\".format(self.name))\n except requests.exceptions.ConnectionError:\n pass\n except Exception:\n self.log.exception(\"consul-deregister-failed\", exc_info=True)", "def resource_cleanup(self):\n for lb in self.loadbalancers:\n self.octavia_client.load_balancer_delete(lb['id'], cascade=True)\n try:\n self.wait_for_lb_resource(\n self.octavia_client.load_balancer_show, lb['id'],\n provisioning_status='DELETED')\n except osc_lib.exceptions.NotFound:\n pass\n for fip in self.fips:\n self.neutron_client.delete_floatingip(fip)\n # we run the parent resource_cleanup last as it will remove instances\n # referenced as members in the above cleaned up load balancers\n super(LBAASv2Test, self).resource_cleanup()", "def teardown(self) -> None:\n self._unregister_service()\n self._unregister_agent()", "def remove_test_instance(instance_name):\n nginx_name = '/etc/nginx/sites-enabled/%s' % instance_name\n if exists(nginx_name):\n sudo('nxdissite %s' % instance_name)\n sudo('/etc/init.d/nginx reload')\n nginx_name = '/etc/nginx/sites-available/%s' % instance_name\n if exists(nginx_name):\n sudo('rm %s' % nginx_name)\n\n upstart_link = \"/etc/init/%s.conf\" % instance_name\n if exists(upstart_link):\n sudo('stop %s' % instance_name)\n sudo('rm %s' % upstart_link)\n sudo('initctl reload-configuration')\n\n instance_dir = env.site_root + instance_name\n if exists(instance_dir):\n run('rm -Rf %s' % instance_dir)", "def destroy(self):\n if hasattr(self, 'vistrailsStartup'):\n self.vistrailsStartup.destroy()", "def stop(self):\n if self.profile:\n self.profiler.disable()\n self.stopped = time.time()\n super(MockApplication, self).stop()", "def destroy(self):\n if not self.status:\n self.class_logger.info(\"Skip iperf tg id:{0}({1}) destroying because \"\n \"it's has already Off status.\".format(self.id, self.name))\n return\n self.stop()\n\n self.sanitize()", "def _stopping(self, sender, **kwargs):\n for v in self._platform_connections.values():\n v.kill()\n\n self._platform_connections.clear()\n\n self.vip.rpc.call(MASTER_WEB, 'unregister_all_agent_routes',\n self.core.identity).get(timeout=30)", "def shutdown_instances(self):\r\n self.min_size = 0\r\n self.max_size = 0\r\n self.desired_capacity = 0\r\n self.update()", "def cleanup_resources(self, restart=False):", "def restart_elb_instances ( ec2_conn, elb_conn, elb, restart_smoothly = True ) :\n instances_restarted = True\n for elb_instance in elb.instances :\n if restart_smoothly :\n elb_conn.deregister_instances( elb.name, [ elb_instance.id ] )\n\n instance = find_instance_by_id( ec2_conn, elb_instance.id )\n print \"Restarting instance with id: \" + elb_instance.id\n instance.reboot( )\n if restart_smoothly :\n running = wait_on_object_state( instance, 'running' )\n if not running :\n print \"WARNING: instance still not up after long period of time!\"\n\n elb_conn.register_instances( elb.name, [ elb_instance.id ] )\n inservice = wait_on_elb_instance( elb_conn, elb.name, elb_instance.id )\n if not inservice :\n print \"WARNING: instance still not in service with ELB after long period of time!\"\n instances_restarted = False\n\n return instances_restarted", "def restart_arangods(self):\n for instance in self.all_instances:\n instance.kill_instance()\n instance.rename_logfile()\n self.detect_instances()", "def stop(self):\n self.logger.debug(\"Plugin '{}': stop method called\".format(self.get_fullname()))\n self.scheduler_remove('check_login')\n self.alive = False", "def check_elb_instance_health(elb_name, instances):\n if_verbose(\"Checking ELB %s instance health for %s\" % (elb_name, instances))\n timer = time.time()\n while (True):\n if_verbose(\"Sleeping for %d ELB instance health\" % args.update_timeout)\n time.sleep(args.update_timeout)\n\n if int(time.time() - timer) >= args.health_check_timeout:\n return \"Health check timer expired. A manual clean up is likely.\"\n\n healthy_elb_instances = 0\n elb_instances = elb.describe_instance_health(LoadBalancerName=elb_name, Instances=instances)\n for instance in elb_instances[\"InstanceStates\"]:\n if_verbose(\"Progress of ELB instance %s: %s\" % (instance[\"InstanceId\"], instance[\"State\"]))\n\n if instance[\"State\"] == \"InService\":\n healthy_elb_instances += 1\n\n if healthy_elb_instances == len(instances):\n break\n else:\n healthy_elb_instances = 0\n\n if_verbose(\"ELB %s is healthy with instances %s\" % (elb_name, elb_instances))\n return None", "def power_off(self):\n for vm in self.vms:\n try:\n vm.name = \"%s_%s\" % (self.resource_pool, vm.name)\n vm.power_off(manager=self.manager)\n except:\n self.logger.error(\"Error with VM '%s'\" % vm.name)\n raise", "def test_ungraceful_shutdown_aws(self, resources, instances, aws_obj, force):\n aws_obj.stop_ec2_instances(instances=instances, wait=True, force=force)\n aws_obj.start_ec2_instances(instances=instances, wait=True)\n self.validate_cluster(resources, instances)", "def deregister_instances(self, instances):\r\n if isinstance(instances, str) or isinstance(instances, unicode):\r\n instances = [instances]\r\n new_instances = self.connection.deregister_instances(self.name, instances)\r\n self.instances = new_instances", "def stop():\n app = get_vistrails_application()\n app.finishSession()\n app.save_configuration()\n app.destroy()", "def REBshutdown(self):\n pass" ]
[ "0.6334031", "0.62459713", "0.6142694", "0.5938756", "0.5917088", "0.57516193", "0.5744347", "0.5738114", "0.5633704", "0.5574055", "0.5539278", "0.5505054", "0.5477789", "0.5458649", "0.5433622", "0.54268354", "0.54172254", "0.54146117", "0.5390834", "0.5369521", "0.5361009", "0.5341814", "0.532118", "0.53177524", "0.53096503", "0.52956736", "0.5291928", "0.52918756", "0.5261415", "0.5220176" ]
0.78677464
0
Given an array of boto.ec2.instances returns instance ids.
def _get_instance_ids(instances): instance_ids = [] for instance in instances: instance_ids.append(instance.id) return instance_ids
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ids(self, instances):\n instance_ids = []\n for instance in instances:\n instance_ids.append(instance.id)\n return instance_ids", "def get_instances_ids(self):\n reservations = self.__get_reservations()\n instances_ids = []\n instances,_ = self.__get_multi_instances(reservations)\n for instance in instances:\n instances_ids.append(instance.id.encode(\"latin-1\"))\n return instances_ids", "def get_elb_instance_ids(elbclient, elbname):\r\n try:\r\n resp = elbclient.describe_load_balancers(LoadBalancerNames=[elbname])\r\n except:\r\n print(ex.message)\r\n return None\r\n return list(map(\r\n lambda x:x['InstanceId'],\r\n resp['LoadBalancerDescriptions'][0]['Instances']\r\n ))", "def fetch_instances(self, ids):\n result = []\n self.log.info(f\"fetch '{len(ids)}' instances\")\n self.log.debug(f\"fetch instance data for ids '{ids}'\")\n try:\n response = self.client.describe_instances(\n InstanceIds=ids\n )\n if 'HTTPStatusCode' in response['ResponseMetadata'] and response['ResponseMetadata']['HTTPStatusCode'] == 200:\n pass\n else:\n raise Exception(f'not able to fetch instacnes with ids: {ids}')\n if len(response['Reservations'][0]['Instances']) == 0:\n raise Exception(f'should retrun at least single insatance data')\n result = []\n for reservation in response[\"Reservations\"]:\n for el in reservation[\"Instances\"]:\n ec2 = EC2Instance.factory(el)\n if ec2.state:\n result.append(ec2)\n else:\n self.log.warn(f'instance \"{ec2.id}\" excluded')\n except Exception as e:\n raise Exception(f'exception when trying to fetch instance data {ids}')\n return sorted(list(result), key=lambda instance: instance.launch_time)", "def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})", "def get_instances(instance_ids):\n\n instances = dict()\n conn = connect_to_region(REGION, aws_access_key_id=KEY_ID, aws_secret_access_key=ACCESS_KEY)\n try:\n reservations = conn.get_all_instances(instance_ids)\n except EC2ResponseError, ex:\n print 'Got exception when calling EC2 for instances (%s): %s' % \\\n (\", \".join(instance_ids), ex.error_message)\n return instances\n\n for r in reservations:\n if len(r.instances) and r.instances[0].id in instance_ids:\n instances[r.instances[0].id] = r.instances[0].tags[\"Name\"]\n\n return instances", "def list_instances(self):\n LOG.debug(\"list_instances\")\n\n instance_ids = []\n bmms = db.bmm_get_all(None)\n for bmm in bmms:\n if not bmm[\"instance_id\"]:\n continue\n instance_ids.append(self._instance_id_to_name(bmm[\"instance_id\"]))\n\n return instance_ids", "def get_ec2_instances(client):\n reservations = client.describe_instances().get(\"Reservations\")\n instances = list(map(lambda x: x.get(\"Instances\"), reservations))\n instances = list(itertools.chain.from_iterable(instances))\n return list(map(lambda x: {\n 'name': next((t['Value'] for t in x.get('Tags', []) if t.get('Key') == 'Name'), 'Unknown'),\n 'id': x.get('InstanceId'),\n 'state': x.get('State'),\n }, instances))", "def _get_ids_from_hostname(self, hostname):\r\n results = self.list_instances(hostname=hostname, mask=\"id\")\r\n return [result['id'] for result in results]", "def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances", "def GetInstances(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n instances = self._SendRequest(HTTP_GET,\n \"/%s/instances\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return instances\n else:\n return [i[\"id\"] for i in instances]", "def __get_multi_instances(self, reservations, instance_ids=None, policies=None):\n check_instance_ids = False\n if ( instance_ids and len(instance_ids) > 0 ):\n check_instance_ids = True\n instances = [] \n for reservation in reservations:\n if check_instance_ids:\n for instance in reservation.instances:\n if instance.id in instance_ids:\n instances.append(instance)\n elif policies:\n for instance in reservation.instances:\n if 'typevm' in policies and instance.instance_type == policies['typevm']:\n instances.append(instance) \n elif policies.get('level')==1:\n if self.__compare_types_instances(policies, instance.instance_type.encode(\"latin-1\")):\n instances.append(instance)\n elif policies.get('level') == 0:\n if self.__is_adaptive_instance(self.__get_metrics_adapted(policies), instance.instance_type.encode(\"latin-1\")):\n instances.append(instance)\n else:\n instances=[]\n else:\n instances += reservation.instances\n return instances, len(instances)", "def Get_Running_Instances():\n ec2 = boto3.resource('ec2') \n #call the features resource from the boto3 library\n instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['pending', 'running',]},])\n #filter the instances returned using the state name\n #you can also filter using Tags by adding the filters: \n #[{'Name': 'tag-key', 'Values': ['Role','Name',]}, {'Name': 'tag-value', 'Values': ['*test*', '*TEST*',]},]\n return [instance.id for instance in instances]\n #return a liste with the ids of the instances", "def list_instances(self):\n print '# AWS EC2 instances'\n self.compute.list_instances()", "def get_asg_instance_ids(self, asg_name):\n instance_ids = []\n # Grab the first item in the list because we're only asking for 1 ASG\n try:\n asg_data = self.asg.describe_auto_scaling_groups(\n AutoScalingGroupNames=[asg_name])['AutoScalingGroups'][0]\n except Exception as e: \n logger.info(e)\n return []\n\n for instance_data in asg_data['Instances']:\n instance_ids.append(instance_data['InstanceId'])\n\n return instance_ids", "def list_instance_uuids(self):\n return self.list_instances()", "def source_instance_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"source_instance_ids\")", "def terminate_instance(self, instance_ids):\n instances_terminated = []\n if (len(instance_ids) > 0):\n self.__validate_instance_id(instance_ids)\n euca_conn = self.__make_connection()\n for instance_id in instance_ids:\n try:\n instance = euca_conn.terminate_instances(instance_id.encode(\"latin-1\"))\n instances_terminated.append(instance)\n except Exception, ex:\n self.euca.display_error_and_exit('%s' % ex)\n\n return instances_terminated\n else:\n return False", "def start_instances(self, instance_ids=None):\r\n params = {}\r\n if instance_ids:\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n return self.get_list('StartInstances', params,\r\n [('item', Instance)], verb='POST')", "def startstop_instances(module, ecs, instance_ids, state, instance_tags):\n\n changed = False\n instance_dict_array = []\n\n if not isinstance(instance_ids, list) or len(instance_ids) < 1:\n # Fail unless the user defined instance tags\n if not instance_tags:\n module.fail_json(msg='instance_ids should be a list of instances, aborting')\n\n # To make an ECS tag filter, we need to prepend 'tag:' to each key.\n # An empty filter does no filtering, so it's safe to pass it to the\n # get_all_instances method even if the user did not specify instance_tags\n filters = []\n if instance_tags:\n for inst_tag in instance_tags:\n tag = {}\n tag[\"tag:\" + inst_tag['tag_key']] = inst_tag['tag_value']\n filters.append(tag)\n # Check (and eventually change) instances attributes and instances state\n running_instances_array = []\n region, connect_args = get_acs_connection_info(module)\n connect_args['force'] = module.params.get('force', None)\n for inst in ecs.get_all_instances(instance_ids=instance_ids, filters=filters):\n if inst.state != state:\n instance_dict_array.append(get_instance_info(inst))\n try:\n if state == 'running':\n inst.start()\n elif state == 'restarted':\n inst.reboot()\n else:\n inst.stop()\n except ECSResponseError as e:\n module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))\n changed = True\n\n return (changed, instance_dict_array, instance_ids)", "def aws_get_instances_by_id(region, instance_id, raw=True):\n client = boto3.session.Session().client('ec2', region)\n try:\n matching_reservations = client.describe_instances(InstanceIds=[instance_id]).get('Reservations', [])\n except ClientError as exc:\n if exc.response.get('Error', {}).get('Code') != 'InvalidInstanceID.NotFound':\n raise\n return []\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances", "def list_as_instances(access_key, secret_key, region, autoscaling_group):\n\n aws_as = init_aws_as_conn(access_key, secret_key, region)\n aws_ec2 = init_aws_ec2_conn(access_key, secret_key, region)\n autoscaling_instances = []\n\n vm = aws_as.get_all_groups([autoscaling_group])\n autoscaling_instances_id = [j.instance_id for i in vm for j in i.instances]\n\n for instance_id in autoscaling_instances_id:\n vm = boto.ec2.instance.Instance(aws_ec2)\n vm.id = instance_id\n vm.update()\n autoscaling_instances.append(vm)\n\n return autoscaling_instances", "def list_instances():\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region.\".format(SESSION.region_name))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n try:\n for instance in EC2_MANAGER.list_instances():\n # get the instance name in the tags list\n name = next((item for item in instance.tags if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance.id,\n instance.instance_type,\n instance.state['Name'],\n name['Value']))\n except ClientError as e:\n ErrManager.err_manager(e)\n\n print(str_sep)", "def source_instance_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"source_instance_ids\")", "def source_instance_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"source_instance_ids\")", "def terminate_instances(module, ecs, instance_ids, instance_tags):\n\n changed = False\n instance_dict_array = []\n\n if not isinstance(instance_ids, list) or len(instance_ids) < 1:\n module.fail_json(msg='instance_ids should be a list of instances, aborting')\n filters = {}\n if instance_tags:\n for key, value in instance_tags.items():\n filters[\"tag:\" + key] = value\n\n terminated_instance_ids = []\n region, connect_args = get_acs_connection_info(module)\n for inst in ecs.get_all_instances(instance_ids=instance_ids, filters=filters):\n if inst.state == 'absent':\n terminated_instance_ids.append(inst.id)\n instance_dict_array.append(get_instance_info(inst))\n try:\n inst.terminate(**connect_args)\n except ECSResponseError as e:\n module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e))\n changed = True\n\n return (changed, instance_dict_array, terminated_instance_ids)", "def get_instances(instance_ids: np.ndarray, class_ids: np.ndarray,\n class_labels: List[str], id2label: Dict) -> Dict:\n assert len(class_labels) == len(class_ids)\n instances = {}\n for label in class_labels:\n instances[label] = []\n # traverse all instances\n inst_ids = np.unique(instance_ids)\n for id in inst_ids:\n # skip 0 and negative instance id (background points)\n if id <= 0:\n continue\n # get instance\n inst = VertInstance(instance_ids, id)\n # record in correspond class dict\n if inst.label_id in class_ids:\n instances[id2label[inst.label_id]].append(inst.dict)\n return instances", "def running_instances(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n\n # Boto3 resource creation by providing the access_id and access_secret\n ec2 = boto3.resource(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n instances = ec2.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])\n for instance in instances:\n attachment = MessageAttachmentsClass()\n attachment.title = instance.id\n message.attach(attachment)\n\n button = MessageButtonsClass()\n button.text = \"Stop Instance\"\n button.value = \"Stop Instance\"\n button.name = \"Stop Instance\"\n button.command = {\"service_application\": self.yellowant_integration_id,\n \"function_name\": \"stop-instance\",\n \"data\": {\"Instance-ID\": instance.id, \"Region\": region}}\n attachment.attach_button(button)\n\n message.message_text = \"Instances Running are:\"\n return message.to_json()", "def list_instances(self):\n instances = utils.list_instances(self.compute_client,\n drv_conf.resource_group)\n\n self._uuid_to_omni_instance.clear()\n instance_names = []\n for instance in instances:\n openstack_id = None\n if instance.tags and 'openstack_id' in instance.tags:\n openstack_id = instance.tags['openstack_id']\n if openstack_id is None:\n openstack_id = self._get_uuid_from_omni_id(instance.name)\n self._uuid_to_omni_instance[openstack_id] = instance\n instance_names.append(instance.name)\n return instance_names", "def find_instances(\n instance_id=None,\n name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n return_objs=False,\n in_states=None,\n filters=None,\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n filter_parameters = {\"filters\": {}}\n\n if instance_id:\n filter_parameters[\"instance_ids\"] = [instance_id]\n\n if name:\n filter_parameters[\"filters\"][\"tag:Name\"] = name\n\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"filters\"][\"tag:{}\".format(tag_name)] = tag_value\n\n if filters:\n filter_parameters[\"filters\"].update(filters)\n\n reservations = conn.get_all_reservations(**filter_parameters)\n instances = [i for r in reservations for i in r.instances]\n log.debug(\n \"The filters criteria %s matched the following instances:%s\",\n filter_parameters,\n instances,\n )\n\n if in_states:\n instances = [i for i in instances if i.state in in_states]\n log.debug(\n \"Limiting instance matches to those in the requested states: %s\",\n instances,\n )\n if instances:\n if return_objs:\n return instances\n return [instance.id for instance in instances]\n else:\n return []\n except boto.exception.BotoServerError as exc:\n log.error(exc)\n return []" ]
[ "0.79161435", "0.7302014", "0.6808307", "0.67969036", "0.6750258", "0.6715816", "0.64550406", "0.64538854", "0.63021296", "0.6263368", "0.6237591", "0.62266445", "0.62067336", "0.6107761", "0.60811573", "0.6030414", "0.60244936", "0.6010169", "0.59973186", "0.5986456", "0.5975571", "0.59570324", "0.5946288", "0.5900756", "0.5900756", "0.58712804", "0.5867998", "0.5852129", "0.5841316", "0.5840723" ]
0.83387077
0
Ecrire une fonction qui renvoie True si le point (x, y) est dans le cercle de r et de centre (0, 0)
def dans_cercle(self, r, x, y): self.r_num(r) valid = (isinstance(x, int) or isinstance(x, float)) and \ (isinstance(y, int) or isinstance(y, float)) if valid: if sqrt(x**2+y**2)<self.r: return True else: return False else: raise TypeError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def in_circle(x0, y0, x, y, r):\n return ((x - x0) ** 2 + (y - y0) ** 2) <= (r ** 2)", "def incircle(self,xpos,ypos,cellx,celly):\n xcell, ycell = self.getcellcenter(cellx,celly)\n if ((xpos - xcell)**2 + (ypos - ycell)**2) < self.crad2:\n return True\n return False\n\n return cellx, celly", "def colisiona(self, r, p):\n # Esta en el eje de las x?\n if p[0] >= r[0] and p[0] <= r[0] + 10:\n # Esta en el eje de las y?\n if p[1] >= r[1] and p[1] <= r[1] + 5:\n return True\n else:\n return False\n else:\n return False", "def point_inside_circle(x,y,center_x,center_y,radius):\n return (x-center_x)**2 + (y - center_y)**2 < radius**2", "def check_in(x, y, R=Re):\n r = np.sqrt(x ** 2 + y ** 2)\n return r <= R", "def circles_overlapping(x1, y1, x2, y2, r):\n # print(abs((x2-x1)**2 + (y2-y1)**2))\n # print((2*r)**2)\n if (abs((x2-x1)**2 + (y2-y1)**2) > (2*r)**2):\n return False\n else: return True", "def contains_point(self, x, y):\r\n if self.m == None:\r\n if abs(x - self.start[0]) > 0.6:\r\n return False\r\n else:\r\n if (y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1]):\r\n return True\r\n else:\r\n return False\r\n else: \r\n y0 = int(self.m * x + self.n)\r\n if abs(y - y0) > 0.6: \r\n return False \r\n else: \r\n if ((x >= self.start[0] and x <= self.end[0]) or \\\r\n (x <= self.start[0] and x >= self.end[0])) and \\\r\n ((y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1])): \r\n return True\r\n else:\r\n return False", "def is_point_inside_hypercube(point: List[float], c: List[float], r: float) -> bool:\n diff = np.subtract(point, c)\n return np.all(np.absolute(diff) <= r)", "def isInCircle(self,x1,y1,radius1):\r\n if(distance(self.x,x1,self.y,y1) < (self.radius+radius1)):\r\n return True\r\n return False", "def in_ellipse(x,y,a,b):\n return ellipse(x,y,a,b) <= 1", "def checkBounds(x,y,z,center,radius):\n r2 = (x-center[0])**2 + (y-center[1])**2# + (z-center[0])**2\n if r2 < radius**2:\n return True\n else:\n return False", "def is_center(self):\n if self.pupils_located:\n return self.is_right() is not True and self.is_left() is not True", "def circles_collide(x1: float, y1: float, r1: float, x2: float, y2: float, r2: float) -> bool:\n return distance_between_sq(x1, y1, x2, y2) <= (r1 + r2)**2", "def is_in_collision_point(self, pos):\n x, y = pos\n return sqrt((self.x - x)**2 + (self.y - y)**2) < self.r", "def is_point_inside_hypermoon(point: np.array, c: Tuple[List[float]], r: Tuple[float]) -> bool:\n return is_point_inside_hypersphere(point, c[0], r[0]) and not is_point_inside_hypersphere(point, c[1], r[1])", "def contains(self, position):\n return np.linalg.norm(position - self._center) < self._radius", "def inside_unit_circle(point):\n distance = math.sqrt(point[0] ** 2 + point[1] ** 2)\n return distance < 1", "def continuous(self, x, y, X, Y):\n hor = fabs(x - X) == SSIZE and y == Y\n ver = fabs(y - Y) == SSIZE and x == X\n return (hor and not ver) or (ver and not hor)", "def iscircle(a):\n if isarc(a):\n start=a[1][1] \n end=a[1][2]\n ## these are special, integer values that flag a true full\n ## circle.\n if start==0 and end==360:\n return True\n else:\n return False", "def _is_circle_contractive(self,r,tol):\n B=np.diag(self.b)\n M=np.dot(B,self.A)+np.dot(self.A.T,B)-np.outer(self.b,self.b)\n X=M+B/r\n v,d=np.linalg.eig(X)\n if v.min()>-tol:\n return 1\n else:\n return 0", "def __contains__(self, point): \n corners = self.corners\n\n if isinstance(point, tuple):\n from pyresample.spherical_geometry import Coordinate\n retval = planar_point_inside(Coordinate(*point), corners)\n else:\n retval = planar_point_inside(point, corners)\n\n #print ' retval from FALSE CORNERS contains '+str(retval)\n\n return retval", "def __contains__(self, other):\n x, y = other\n return self.radius >= sqrt((x - self.x) ** 2 + (y - self.y) ** 2)", "def colision(self, X, Y):\n #ESTE IF COMPROBARA MEDIANTE LAS POSICIONES EN EL EJE SI HAN GOLPEADO AL SUBDITO\n if X <= self.x + self.width and X >= self.x:\n if Y <= self.y + self.height and Y >=self.y:\n return True\n return False", "def at_loc((x, y), (cx, cy), eps=0.000035):\n\treturn (x - cx)**2 + (y - cy)**2 <= eps**2", "def hit(bx, by, r, px, py,h):\n if bx >= px:\n distance = bx - px\n else:\n distance = px - bx\n if py<=by and by<=py+h and distance <= r:\n return True\n else:\n return False", "def contains(self, point):\n return 0 <= point.x <= 1 \\\n and 0 <= point.y <= 1 \\\n and 0 <= point.z <= 1", "def isinsidearcXY(c,p):\n\n x = c[0]\n r = c[1][0]\n if dist(x,p) > r:\n return False\n if iscircle(c):\n return True\n start = c[1][1]%360.0\n end = c[1][2]%360.0\n if end < start:\n end+= 360.0\n p2 = sub(p,x)\n ang = (atan2(p2[1],p2[0]) % pi2)*360/pi2\n\n if end <= 360.0:\n return (ang >= start and ang <= end)\n else:\n return ang >= start or ang <= (end-360.0)", "def contains_point(self, x, y): \r\n n = len(self.points)\r\n inside = False\r\n \r\n x1, y1 = self.points[0]\r\n for i in range(n + 1):\r\n x2, y2 = self.points[i % n]\r\n if y > min(y1, y2):\r\n if y <= max(y1, y2):\r\n if x <= max(x1, x2):\r\n if y1 != y2:\r\n xinters = (y - y1) * (x2 - x1) / (y2 - y1) + x1\r\n if x1 == x2 or x <= xinters:\r\n inside = not inside\r\n x1, y1 = x2, y2\r\n \r\n return inside", "def onSegment(self, p, q, r):\n if ((q.x <= max(p.x, r.x)) and (q.x >= min(p.x, r.x)) and\n (q.y <= max(p.y, r.y)) and (q.y >= min(p.y, r.y))):\n return True\n return False", "def is_point_inside_hypersphere(point: np.array, c: List[float], r: float) -> bool:\n return np.linalg.norm(point - c) < r" ]
[ "0.79219043", "0.71947825", "0.7167775", "0.7003251", "0.68779093", "0.68285936", "0.68090075", "0.67871714", "0.6755175", "0.6642627", "0.66155875", "0.6614157", "0.6549788", "0.648813", "0.6464612", "0.64520663", "0.64299595", "0.6427061", "0.6418052", "0.6398764", "0.6390092", "0.63895875", "0.6373094", "0.6366721", "0.63467073", "0.6326051", "0.6319644", "0.6295654", "0.6279686", "0.6279552" ]
0.74996036
1
Helper to calculate the remaining calls to github.
async def remaining(github: GitHubAPI): try: result = await github.rate_limit() except GitHubAuthenticationException as exception: _LOGGER.error(f"GitHub authentication failed - {exception}") return None except BaseException as exception: # pylint: disable=broad-except _LOGGER.error(exception) return 0 return result.data.resources.core.remaining or 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def get_fetch_updates_for(github: GitHubAPI):\n if (limit := await remaining(github)) is None:\n return None\n\n if limit - RATE_LIMIT_THRESHOLD <= CALLS_PR_REPOSITORY:\n return 0\n return math.floor((limit - RATE_LIMIT_THRESHOLD) / CALLS_PR_REPOSITORY)", "def get_pullReq_commits(pullreq_url, user, passwd):\n \n #auth for 5000 request/h limitprint(\"\\nINPUT GITHUB AUTH TO GET BETTER REQUEST LIMIT\")\n if user=='' or passwd=='':\n user = input('username : ')\n passwd = input('passwd : ')\n\n #fetch 250 max commits\n pullReq_commits = get_requests(pullreq_url, user, passwd)\n\n return pullReq_commits", "def get_pull_requests_count(self):\n repo_details = self.repo_url.strip().split('/')[-2:]\n pull_requests = 0\n i = 1\n while True:\n args = {'state': 'open', 'page': i, 'per_page': 100}\n api_url = \"https://api.github.com/repos/{}/{}/pulls?{}\".format(repo_details[0], repo_details[1],\n urllib.parse.urlencode(args))\n response = requests.request(\"GET\", api_url)\n response = json.loads(response.content)\n if not response:\n return pull_requests\n else:\n pull_requests += len(response)\n i += 1", "def check_previous_contributions(repo, author):\n cmds = [github_cli, 'search', 'prs', '--author', author, '--repo', repo, '--json', 'number,state']\n\n with subprocess.Popen(cmds, stdout=subprocess.PIPE) as p:\n result, err = p.communicate()\n returncode = p.returncode\n print(err)\n print(returncode)\n ntries = 1\n if returncode:\n while returncode and ntries < 10:\n ntries += 1\n time.sleep(10)\n with subprocess.Popen(cmds, stdout=subprocess.PIPE) as p:\n result, err = p.communicate()\n returncode = p.returncode\n print(\"New returncode : \", returncode)\n\n\n return json.loads(result)", "def get_pullReq(repo_url, user, passwd):\n \n #auth for 5000 request/h limitprint(\"\\nINPUT GITHUB AUTH TO GET BETTER REQUEST LIMIT\")\n if user=='' or passwd=='':\n user = input('username : ')\n passwd = input('passwd : ')\n\n #repo url\n github_pullReq_url = \"https://api.github.com/repos/{}/pulls?state=all&per_page=100&page=\"\n url = github_pullReq_url.format(repo_url)\n\n #fetch all pages\n pullReq = []\n i=1\n eop = False\n while not eop:\n print(\"\\n\\nFECTHING PAGE {}\".format(i))\n data = get_requests(url+str(i), user, passwd)\n pullReq = pullReq + data\n i+=1\n if len(data) != 100:\n eop = True\n \n return pullReq", "def _rate_limit_reached(self, waiting=False):\n msg = [\"GitHub rate limit reached.\"]\n if waiting:\n msg.append(\"Waiting for limit reset...\")\n if \"Authorization\" not in self._api_headers():\n msg.append(\"Authenticate to GitHub to increase the limit.\")\n return \" \".join(msg)", "def _handle_rate_limit(self):\n if not self._wait_rate_limit:\n raise GithubRateLimitException(self._rate_limit_reached())\n\n url = GITHUB_API + \"/rate_limit\"\n headers = self._api_headers()\n remaining = 0\n while remaining == 0:\n if self._wait_warn and not Client._RATE_LIMIT_WARNED:\n from warnings import warn\n\n warn(self._rate_limit_reached(True), GithubRateLimitWarning)\n Client._RATE_LIMIT_WARNED |= True\n\n sleep(self._wait_retry_delay)\n resp = self._request(\"GET\", url, headers=headers)\n remaining = int((resp.json())[\"resources\"][\"core\"][\"remaining\"])", "def get_forks(repo_url, user, passwd):\n \n #auth for 5000 request/h limitprint(\"\\nINPUT GITHUB AUTH TO GET BETTER REQUEST LIMIT\")\n if user=='' or passwd=='':\n user = input('username : ')\n passwd = input('passwd : ')\n\n #repo url\n github_fork_url = \"https://api.github.com/repos/{}/forks?sort=stargazers&per_page=100&page=\"\n url = github_fork_url.format(repo_url)\n\n #fetch all pages\n forks = []\n i=1\n eop = False\n while not eop:\n print(\"\\n\\nFECTHING PAGE {}\".format(i))\n data = get_requests(url+str(i), user, passwd)\n forks = forks + data\n i+=1\n if len(data) != 100:\n eop = True\n \n #reject private ones\n temp = forks\n for fork in temp:\n if fork['private'] == True:\n forks.remove(fork)\n print(\"{} private forks\".format(len(temp)-len(forks)))\n\n return forks", "def total_pulls(self) -> int:\n return self.__total_pulls", "async def _calculate_remaining_requests(self, request_id: int) -> int:\n raise NotImplementedError()", "def test_get_github_repos_count_negative(self):\n self.assertEqual(app.get_github_repos_count(\"undefined_user12345\")[\"status\"], 500)", "def get_github_token_info(g):\n rate_limit = g.get_rate_limit()\n near_expiry = rate_limit.core.remaining < 50\n wait_time = (rate_limit.core.reset - datetime.datetime.utcnow()).seconds\n return near_expiry, wait_time", "def get_commits(): # pragma: no cover\n global commit_data\n all_commits = 0\n team = {\n 'stevex196x': 0,\n 'TheSchaft': 0,\n 'melxtru': 0,\n 'aylish19': 0,\n 'connormlewis': 0,\n 'tsukkisuki': 0\n }\n while all_commits == 0:\n url = 'https://api.github.com/repos/connormlewis/idb/stats/contributors'\n data = requests.get(\n url, headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n json_list = data.json()\n for entry in json_list:\n total = entry['total']\n user_name = entry['author']['login']\n if user_name in team:\n team[user_name] = total\n all_commits += total\n return team, all_commits", "def get_github_commits():\n utcnow = datetime.datetime.utcnow()\n yesterday = utcnow - datetime.timedelta(hours=24)\n yesterday = yesterday.replace(hour=12, minute=0, second=0)\n iso = yesterday.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\n txt = [\"> IEM Code Pushes <to branch> on Github\\n\"]\n html = [\"<h3>IEM Code Pushes &lt;to branch&gt; on Github</h3>\"]\n\n # get branches, main is first!\n branches = [\"main\"]\n req = exponential_backoff(requests.get, IEM_BRANCHES, timeout=30)\n for branch in req.json():\n if branch[\"name\"] == \"main\":\n continue\n branches.append(branch[\"name\"])\n\n hashes = []\n links = []\n for branch in branches:\n uri = (\n f\"https://api.github.com/repos/akrherz/iem/commits?since={iso}&\"\n f\"sha={branch}\"\n )\n req2 = exponential_backoff(requests.get, uri, timeout=30)\n # commits are in reverse order\n for commit in req2.json()[::-1]:\n if commit[\"sha\"] in hashes:\n continue\n hashes.append(commit[\"sha\"])\n timestring = commit[\"commit\"][\"author\"][\"date\"]\n utcvalid = datetime.datetime.strptime(\n timestring, \"%Y-%m-%dT%H:%M:%SZ\"\n )\n valid = utcvalid.replace(tzinfo=pytz.utc).astimezone(\n pytz.timezone(\"America/Chicago\")\n )\n data = {\n \"stamp\": valid.strftime(\"%b %-d %-2I:%M %p\"),\n \"msg\": commit[\"commit\"][\"message\"],\n \"htmlmsg\": htmlize(commit[\"commit\"][\"message\"])\n .replace(\"\\n\\n\", \"\\n\")\n .replace(\"\\n\", \"<br />\\n\"),\n \"branch\": branch,\n \"url\": commit[\"html_url\"][:-20], # chomp to make shorter\n \"i\": len(links) + 1,\n }\n links.append(\"[%(i)s] %(url)s\" % data)\n txt.append(\n mywrap(\" %(stamp)s[%(i)s] <%(branch)s> %(msg)s\" % data)\n )\n html.append(\n (\n '<li><a href=\"%(url)s\">%(stamp)s</a> '\n \"&lt;%(branch)s&gt; %(htmlmsg)s</li>\\n\"\n )\n % data\n )\n\n if len(txt) == 1:\n txt = txt[0] + \" No code commits found in previous 24 Hours\"\n html = html[0] + (\n \"<strong>No code commits found \" \"in previous 24 Hours</strong>\"\n )\n else:\n txt = \"\\n\".join(txt) + \"\\n\\n\" + \"\\n\".join(links)\n html = html[0] + \"<ul>\" + \"\\n\".join(html[1:]) + \"</ul>\"\n\n return txt + \"\\n\\n\", html + \"<br /><br />\"", "def get_changes(access_token, organization_url, target_repo_name, source_branches, target_branch_name, pull_quantity, ignore_words=[]) -> dict:\n print('\\nConnecting to API\\n')\n try:\n # Create a connection to the org\n credentials = BasicAuthentication('', access_token)\n connection = Connection(base_url=organization_url, creds=credentials)\n\n # Get git Client\n # See azure.devops.v5_0.models for models\n # azure.devops.git.git_client_base for git_client methods\n git_client = connection.clients.get_git_client()\n\n # Get the repo\n repositories = git_client.get_repositories()\n\n except MSExceptions.ClientRequestError as err:\n print('Client Request Error:', str(err))\n return None\n except MSExceptions.AuthenticationError as err:\n print('Authentication Error: ', str(err))\n\n target_repo = None\n for repo in repositories:\n if repo.name == target_repo_name:\n target_repo = repo\n\n if not target_repo:\n print(f'Repository {target_repo_name} not found.')\n return None\n\n all_changes = {}\n\n ignored_commits = []\n processed_commits = []\n\n for branch in source_branches:\n\n # Find commits for the specific branch combination\n search_criteria = GitPullRequestSearchCriteria (\n source_ref_name = f'refs/heads/{branch}',\n target_ref_name = f'refs/heads/{target_branch_name}',\n status = 'Completed'\n )\n\n pull_requests = git_client.get_pull_requests(target_repo.id, search_criteria, top=9999)\n\n\n print(f\"Proccesing PR commits for {branch}...\")\n with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:\n future_prs = { executor.submit(process_pull_requests, git_client, target_repo.id, pull, ignore_words): pull for pull in pull_requests}\n for future in tqdm(futures.as_completed(future_prs), unit=' PRs'):\n data, processed, ignored = future.result()\n for change in data.keys():\n if all_changes.get(change):\n all_changes[change] = all_changes[change] + data[change]\n else:\n all_changes[change] = data[change]\n for commit in processed:\n processed_commits.append(commit)\n for commit in ignored:\n ignored_commits.append(commit)\n print()\n\n return all_changes, processed_commits, ignored_commits", "def test_repositories(self):\n\t\ttot_repos = total_repos(self)\n\t\t#self.assertEqual(tot_repos, \"6052353)", "def github_issue(issue_url: str, check_rate_limit: bool = True) -> Dict[str, Any]:\n user_agent = os.environ.get('THUMBSUP_USER_AGENT', 'thumbsup')\n headers = {\n 'Accept': GITHUB_ACCEPT_HEADER,\n 'User-Agent': user_agent,\n }\n github_token_file = os.environ.get('THUMBSUP_GITHUB_TOKEN_FILE')\n if github_token_file is not None:\n with open(github_token_file, 'r') as ifp:\n github_token = ifp.read().strip()\n if github_token != UNAUTHENTICATED and github_token != '':\n headers['Authorization'] = f'token {github_token}'\n\n if check_rate_limit:\n r = requests.get(GITHUB_RATE_LIMIT_URL, headers=headers)\n rate_limit_response = r.json()\n remaining_rate_limit = (\n rate_limit_response.get('resources', {}).get('core', {}).get('remaining', 0)\n )\n rate_limit_bound = os.environ.get('THUMBSUP_RATE_LIMIT_BOUND', '10')\n rate_limit_bound = int(rate_limit_bound)\n if remaining_rate_limit < rate_limit_bound:\n raise RateLimitError(f'Remaining GitHub rate limit too low: {remaining_rate_limit}')\n\n match = RE_GITHUB_ISSUE_EXTRACTOR.search(issue_url)\n if not match:\n raise SummaryError(f'URL does not match form of link to GitHub Issue: {issue_url}')\n\n owner, repo, issue_number = match.groups()\n\n issue_url = f'https://api.github.com/repos/{owner}/{repo}/issues/{issue_number}'\n r = requests.get(issue_url, headers=headers)\n issue_response = r.json()\n\n comments_url = f'{issue_url}/comments'\n r = requests.get(comments_url, headers=headers)\n comment_response = r.json()\n comment_response.sort(\n key=lambda c: github_num_reactions(c) + github_num_positive_reactions(c),\n reverse=True\n )\n return {\n 'summarizer': 'github_issue',\n 'issue': issue_response,\n 'comments': comment_response,\n 'emojis': GITHUB_EMOJIS\n }", "def github(code, input):\n syntax = 'Syntax: \\'.github <user|user/repo>\\''\n failed = 'Failed to get data from Githubs API :('\n if len(input.group(2).strip().split()) != 1:\n return code.say(syntax)\n\n spacer = ' {blue}|{c} '\n\n if '/' not in input.group(2):\n # Assume a single username\n try:\n tmp = web.json(user_api % input.group(2).strip())\n response = {}\n # Remove dem ugly nulled values. It's a dictionary so we have to\n # loop differently.\n for key, value in tmp.iteritems():\n if value != '' or len(value) != 0 or value != 'null':\n response[key] = value\n print response\n except:\n return code.say(failed)\n if 'message' in response:\n # Assume failed\n return code.say(failed)\n\n # Here is where we build the response\n output = []\n if 'name' in response:\n output.append('%s (%s)' % (response['name'], response['login']))\n else:\n output.append(response['login'])\n if 'location' in response:\n output.append(response['location'])\n if 'email' in response:\n output.append(response['email'])\n if 'public_repos' in response:\n output.append('%s Repos' % response['public_repos'])\n if 'followers' in response:\n output.append('%s Followers' % response['followers'])\n if 'following' in response:\n output.append('Following %s' % response['following'])\n if 'public_gists' in response:\n output.append('%s Gists' % response['public_gists'])\n if 'html_url' in response:\n output.append(response['html_url'])\n\n return code.say(spacer.join(output))\n\n else:\n # Assume Username/Repo\n try:\n response = jweb.json(repo_api % input.group(2).strip())\n except:\n return code.say(failed)\n if 'message' in response:\n # Assume failed\n return code.say(failed)\n # Here is where we build the response\n output = []\n output.append('%s (%s)' %\n (response['name'], response['owner']['login']))\n output.append(response['description'])\n output.append('%s %s' % (response['stargazers_count'], u'\\u2605'))\n output.append('%s %s' % (response['watchers_count'], u'\\u231A'))\n output.append('%s %s' % (response['forks_count'], u'\\u2442'))\n output.append('%s %s' % (response['open_issues_count'], u'\\u2602'))\n output.append('%s %s' % (response['network_count'], u'\\U0001F46C'))\n output.append('%s %s' % (response['subscribers_count'], u'\\u2764'))\n output.append(response['html_url'])\n return code.say(spacer.join(output))", "def get_commits(github_id, repo):\r\n\r\n url = 'https://api.github.com/repos/{}/{}/commits'.format(github_id, repo)\r\n response = requests.get(url)\r\n todos = json.loads(response.text)\r\n\r\n commit_count = 0\r\n\r\n for data in todos:\r\n commit_count += 1\r\n\r\n return commit_count", "def run(organization, top_n, username, pat):\n print()\n try:\n raw_repos = get_repos(organization, username=username, pat=pat)\n except Exception as ex:\n click.echo('Error collecting repos')\n sys.exit(1)\n\n repos = []\n\n with Halo(text='Retrieving repos...', spinner='dots'):\n for raw_repo in raw_repos:\n repos.append(Repo(raw_repo))\n\n if len(repos) == 0:\n print('No public repos were found')\n sys.exit(0)\n\n with Halo(text='Retrieving pull requests...', spinner='dots'):\n try:\n with ThreadPoolExecutor(max_workers=5) as executor:\n future_to_repo = {executor.submit(get_prs, repo.pr_url, username, pat): repo for repo in repos}\n for future in as_completed(future_to_repo):\n repo = future_to_repo[future]\n\n repo.pr_count = future.result()\n except Exception as exc:\n print('%r generated an exception: %s' % (repo.name, exc))\n sys.exit(1)\n\n top_star = sorted(repos, key=lambda repo: repo.stars, reverse=True)[:top_n]\n top_fork = sorted(repos, key=lambda repo: repo.forks, reverse=True)[:top_n]\n top_prs = sorted(repos, key=lambda repo: repo.pr_count, reverse=True)[:top_n]\n top_contrib = sorted(repos, key=lambda repo: repo.contrib, reverse=True)[:top_n]\n\n print_stars(top_star, top_n)\n print_forks(top_fork, top_n)\n print_prs(top_prs, top_n)\n print_contrib(top_contrib, top_n)", "def update_streak(self, test_for_success=False):\n\n pb = PushBullet(PUSHBULLET_KEY)\n\n def github():\n \"\"\"Checks if a commit has been made in the last 24 hours.\"\"\"\n try:\n GITHUB_API = \"https://api.github.com/users/{0}/events\".format(self.user.username)\n\n accepted_events = AcceptedEvent.objects.values_list(\"name\", flat=True)\n events = json.loads(requests.get(GITHUB_API).text)\n for event in events:\n # it needs to be either a commit or a pull request\n # it must also be after the last update.\n if event[\"type\"] in accepted_events \\\n and self.date < dateutil.parser.parse(event[\"created_at\"]):\n return True\n else:\n return False\n except:\n return False\n\n def freecodecamp():\n \"\"\"Checks your freecodecamp profile for progress matching today's date.\"\"\"\n try:\n CODECAMP_URL = \"https://www.freecodecamp.com/{0}\".format(self.user.username)\n document = html5lib.parse(requests.get(CODECAMP_URL).text)\n if document.findtext((datetime.now()-timedelta(days=1)).strftime(\"%b %d, %Y\"), default=None) is None:\n return False\n return True\n except:\n return False\n\n def gitlab():\n try:\n repos_endpoint = \"/api/v3/projects\"\n repos = json.loads(requests.get(\n \"{0}{1}?order_by=last_activity_at&private_token={2}\".format(GITLAB_URL, repos_endpoint, GITLAB_KEY)).text)\n\n commits_endpoint = \"/api/v3/projects/{0}/repository/commits\"\n for repo in repos:\n commits = json.loads(requests.get(\n \"{0}{1}?order_by=last_activity_at&private_token={2}\".format(GITLAB_URL,\n commits_endpoint.format(repo[\"id\"]),\n GITLAB_KEY)).text)\n\n # if we get to a repo hasn't been updated in the last 24 hours, return false\n # (they are ordered by latest activity)\n if self.date > dateutil.parser.parse(repo[\"last_activity_at\"]):\n return False\n\n for commit in commits: # if the date is not in the last day, break\n if self.date < dateutil.parser.parse(commit[\"created_at\"]):\n # if we have the right guy, return true\n if commit[\"author_name\"] == self.user.username:\n return True\n else:\n break\n except:\n return False\n\n def session():\n date_from = datetime.now() - timedelta(days=1)\n\n if self.sessions.objects.filter(start__gte=date_from):\n return True\n\n return False\n\n successful = gitlab() or github() or freecodecamp() or session()\n\n if test_for_success is False:\n self.streak += (1*int(successful)*int(self.lost)) # stops you getting more points after losing.\n self.lost = not successful or self.lost # if you lost, it will stay until you open the app.\n self.date = datetime.now()\n if self.lost:\n push = pb.push_link(urlresolvers.resolve(\"codestreak:root\"), \"Your streak is over! Visit the app to reset.\")\n self.save()\n else:\n if successful:\n push = pb.push_note(\"Well done. You made a commit today.\", \":)\")\n else:\n push = pb.push_note(\"You're risking your streak!\", \"It's quite late and you still haven't made a commit. Hurry!\")\n\n return True if successful else False", "async def fetch_commits(self):\n for repo in self.config['repos'].split(','):\n since = datetime.min\n async for msg in self.channel.history(limit=None):\n if not msg.embeds:\n continue\n e = msg.embeds[0]\n if e.title == 'github commit' and e.timestamp and repo in e.description: # type: ignore\n since = e.timestamp\n break\n \n await self.update_commit_activity(repo, since)", "def github_num_reactions(comment_object: Dict[str, Any]) -> int:\n return comment_object.get('reactions', {}).get('total_count', 0)", "def test_get_github_repos_count_positive(self):\n self.assertIsNotNone(app.get_github_repos_count(\"dhh\")[\"count\"])", "def process_pull_requests(git_client, repo_id, pull, ignore_words, ignore_extensionless_files=True) -> [dict, list, list]:\n processed_changes = {}\n commits = git_client.get_pull_request_commits(repo_id, pull.pull_request_id)\n\n ignored_commits = []\n processed_commits = []\n\n for commit in commits:\n commit_text = f'{commit.comment}, author: {color.Fore.BLUE}{color.Style.BRIGHT}{commit.author.name}{color.Style.RESET_ALL}'\n ignore_commit = False\n for word in ignore_words:\n if commit.comment.lower().find(word) != -1:\n ignore_commit = True\n break\n\n if not ignore_commit:\n changes = git_client.get_changes(commit.commit_id, repo_id).changes\n for change in changes:\n file_name = change['item']['path']\n\n if not '.' in file_name and ignore_extensionless_files:\n continue\n\n counter = processed_changes.get(file_name)\n if not counter:\n counter = 0\n counter +=1\n processed_changes[file_name] = counter\n processed_commits.append(commit_text)\n else:\n ignored_commits.append(commit_text)\n\n return processed_changes, processed_commits, ignored_commits", "def get_pull_requests_stat(self) -> Tuple[int, int, int]:\n return self._summarize(\n self._repo.get_pull_requests(), self._STALE_PULL_REQUESTS_DAYS\n )", "def review(args):\n try:\n pr = gh.get_pr(owner, repo, args.pull_request)\n except requests.exceptions.HTTPError:\n print('Couldn\\'t find pull request #%s in %s/%s.' %\n (args.pull_request, owner, repo))\n print('Make sure the number is correct and that you have read '\n 'permissions for this GitHub repository.')\n sys.exit(1)\n\n clone_url = pr['head']['repo']['clone_url']\n fork_branch = pr['head']['ref']\n fork_owner = pr['head']['repo']['owner']['login']\n\n repo_lib.fetch_fork(clone_url, fork_branch, fork_owner)\n repo_lib.checkout(fork_branch, fork_owner)\n sys.exit(0)", "def do_the_pulls(user_id, repo_id):\n with tempfile.TemporaryDirectory() as tmp_dir:\n path = os.path.join(tmp_dir, \"{}_{}_pulls.txt\".format(repo_id, user_id)\n )\n\n # the first request for pull\n the_url = get_initial_url_pulls(user_id, repo_id)\n resp_obj = requests.get(the_url, headers=headers)\n pull_requests = json.loads(resp_obj.text)\n with open(path, \"w\") as out_stream:\n for a_pull_request in pull_requests:\n print(a_pull_request, file=out_stream)\n\n # prase the initial request.\n rsp_json = json.loads(resp_obj.text)\n print(\"the len of resp is {}\".format(len(rsp_json)))\n next_url = None\n if LINK_HEADER in resp_obj.headers:\n next_url = parse_next_url(resp_obj.headers[LINK_HEADER])\n\n # subsequent requests for pull\n while next_url:\n resp_obj = requests.get(next_url, headers=headers)\n pull_requests = json.loads(resp_obj.text)\n with open(path, \"a\") as out_stream:\n for a_pull_request in pull_requests:\n print(a_pull_request, file=out_stream)\n if LINK_HEADER in resp_obj.headers:\n next_url = parse_next_url(resp_obj.headers[LINK_HEADER])\n print(next_url)\n else:\n next_url = None\n GsUpload.upload_blob(GS_BUCKET_NAME, path, basename(path))", "def display_repos_and_commits(github_id):\r\n\r\n repo_list = get_repos(github_id)\r\n\r\n for repo in repo_list:\r\n commits_count = get_commits(github_id, repo)\r\n print('Repo: {} Number of commits: {}'.format(repo, commits_count))", "async def github_repo_info(self, ctx: commands.Context, *repo: str) -> None:\n repo = \"/\".join(repo)\n if repo.count(\"/\") != 1:\n embed = discord.Embed(\n title=random.choice(NEGATIVE_REPLIES),\n description=\"The repository should look like `user/reponame` or `user reponame`.\",\n colour=Colours.soft_red\n )\n\n await ctx.send(embed=embed)\n return\n\n async with ctx.typing():\n repo_data = await self.fetch_data(f\"{GITHUB_API_URL}/repos/{quote(repo)}\")\n\n # There won't be a message key if this repo exists\n if \"message\" in repo_data:\n embed = discord.Embed(\n title=random.choice(NEGATIVE_REPLIES),\n description=\"The requested repository was not found.\",\n colour=Colours.soft_red\n )\n\n await ctx.send(embed=embed)\n return\n\n embed = discord.Embed(\n title=repo_data[\"name\"],\n description=repo_data[\"description\"],\n colour=discord.Colour.blurple(),\n url=repo_data[\"html_url\"]\n )\n\n # If it's a fork, then it will have a parent key\n try:\n parent = repo_data[\"parent\"]\n embed.description += f\"\\n\\nForked from [{parent['full_name']}]({parent['html_url']})\"\n except KeyError:\n log.debug(\"Repository is not a fork.\")\n\n repo_owner = repo_data[\"owner\"]\n\n embed.set_author(\n name=repo_owner[\"login\"],\n url=repo_owner[\"html_url\"],\n icon_url=repo_owner[\"avatar_url\"]\n )\n\n repo_created_at = datetime.strptime(repo_data[\"created_at\"], \"%Y-%m-%dT%H:%M:%SZ\").strftime(\"%d/%m/%Y\")\n last_pushed = datetime.strptime(repo_data[\"pushed_at\"], \"%Y-%m-%dT%H:%M:%SZ\").strftime(\"%d/%m/%Y at %H:%M\")\n\n embed.set_footer(\n text=(\n f\"{repo_data['forks_count']} ⑂ \"\n f\"• {repo_data['stargazers_count']} ⭐ \"\n f\"• Created At {repo_created_at} \"\n f\"• Last Commit {last_pushed}\"\n )\n )\n\n await ctx.send(embed=embed)" ]
[ "0.7030906", "0.63627785", "0.62276036", "0.5946589", "0.59272087", "0.5900084", "0.5822916", "0.5744561", "0.5682124", "0.5652667", "0.5551989", "0.5540379", "0.55062664", "0.54878956", "0.54800576", "0.5454324", "0.53284425", "0.5261303", "0.5250635", "0.5246006", "0.5245562", "0.52298826", "0.52258444", "0.5220632", "0.516864", "0.51574594", "0.5114569", "0.5114216", "0.50998425", "0.5091084" ]
0.6762042
1
Display a coloured subtitle.
def subtitle(string): print("{}\n{}\n".format(bold(string), underline(string, "-")))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subtitle(self, txt):\n num = len(txt)\n ticks = \"-\" * num\n print(txt)\n print(ticks)", "def create_subtitle(self):\n label_subtitle = Label(self.frame, text=\"Projet Python 2020\", font=(\"Arial\", 25), bg='light blue',\n fg='white')\n label_subtitle.pack()", "def subtitle(\n pdf, text, indent=10, border=BORDER, font_size=12, font_style=\"B\"\n): # pylint: disable = too-many-arguments\n pdf.cell(indent, border=border)\n pdf.set_font(\"arial\", font_style, font_size)\n pdf.cell(75, 10, text, border, 1)", "def show_text(text, colour):\n message = font_style.render(text, True, colour)\n dis.blit(message, [game_size_x/2, game_size_y/2])", "def showText(self, context, text, size=1, color=colors.WHITE, conversion=True):\n context.print(text, self.components, size, color=color, conversion=conversion)", "def text(self, str: str, x: int, y: int, colour: int, /) -> None:", "def get_subtitle(annotation, sub_duration, video_clip, seen_annotations):\n if len(annotation[\"text\"]) == 0:\n return None\n\n annotation_txt = calculate_needed_subtitle_height(annotation, seen_annotations, video_clip)\n\n txt_clip = TextClip(annotation_txt, color=\"white\", fontsize=70, font='Sans Serif')\n txt_clip = txt_clip.set_position((\"center\", get_subtitle_offset(annotation, seen_annotations, video_clip)))\n txt_clip = txt_clip.set_start(float(annotation[\"time\"]) / 1000.0)\n txt_clip = txt_clip.set_duration(sub_duration)\n\n return txt_clip", "def showText(self, surface, point, text, color=None, size=20):\n if not color: color = self.color\n v = self / 2\n point = v(point)\n surface.print(text, tuple(point), color=color, size=size)", "def ShittyCaption(sc, event):\n request = urllib2.Request(\n 'http://shittynewyorkercartooncaptions.tumblr.com/random')\n result = urllib2.urlopen(request)\n sc.api_call('chat.postMessage', as_user='true',\n channel=event['channel'], text=result.geturl())", "def enable_subtitle(self, subtitle):\n self._change_track(subtitle)", "def render_text(self, title, pos, color):\n if title not in self.text_elements:\n self.text_elements[title] = UIText(\n title, pos, font_size=20, font_color=color)", "def print_text(TINY_FONT, x, y, text, color = white):\n text_image = TINY_FONT.render(text, True, color)\n gameDisplay.blit(text_image, (x,y))", "def set_intro_text_2(self, text, color):\n text_y_position = (self.objects['titleimage'].b() + self.border.bottom()) / 2\n\ttext_y_position = text_y_position - 30\n self.objects['introtext2'] = LCARSText((self.width/2, text_y_position),\n text,\n 36,\n TextAlign.XALIGN_CENTRE, color, Colours.BG, True)", "def show_text(text, args):\n return expyriment.stimuli.TextLine(text,\n text_font=args[\"--text-font\"],\n text_size=args[\"--text-size\"],\n text_colour=args[\"stimuli_color\"],\n background_colour=args[\"bg_color\"])", "def write_subtitle(self, subtitle: str, break_page: bool, class_txt: str) -> str:\n if break_page:\n str_title = \"\"\"<h2 class=\"break-before\">\"\"\" + subtitle + \"\"\"</h2>\\n\"\"\"\n else:\n str_title = \"\"\"<h2 class=\\\"\"\"\" + class_txt + \"\"\"\\\">\"\"\" + subtitle + \"\"\"</h2>\\n\"\"\"\n self.html_doc = self.html_doc + str_title\n return self.html_doc", "def put_text(self, text, color, point):\n x1, y1 = self.pos_shift\n x2, y2 = point\n if not self.in_display((x2 - x1, y2 - y1)):\n return\n font = pygame.font.SysFont(\"monospace\", 18, bold=True)\n label = font.render(text, 1, color)\n self.screen.blit(label, (\n x2 - x1,\n y2 - y1\n ))", "def Print(self, s, color=(229, 153, 153, 255)):\r\n self.screen.blit(self.font.render(s, True, color), (5, self.textLine))\r\n self.textLine += 15", "def title(self, text, x=0.01, y=0.99, size=15, color=None,\n bgcolor=None, alpha=1, **kwargs):\n if color is None:\n color = 'k' if self._black_bg else 'w'\n if bgcolor is None:\n bgcolor = 'w' if self._black_bg else 'k'\n self.frame_axes.text(x, y, text, \n transform=self.frame_axes.transAxes,\n horizontalalignment='left',\n verticalalignment='top',\n size=size, color=color,\n bbox=dict(boxstyle=\"square,pad=.3\", \n ec=bgcolor, fc=bgcolor, alpha=alpha),\n **kwargs)", "def subtitle_header(slide, title: str):\n top = Inches(0.61)\n left = Inches(0.42)\n height = Inches(0.5)\n width = Inches(0.5)\n txt_box = slide.shapes.add_textbox(left, top, width, height)\n text_frame = txt_box.text_frame\n\n paragraph = text_frame.paragraphs[0]\n paragraph.text = title\n paragraph.font.bold = False\n paragraph.font.size = Pt(22)\n paragraph.font.name = 'Times New Roman'\n\n return slide", "def h1(text):\n print(Fore.GREEN + '\\n%s'%text)\n line = \"\"\n for ii in range(len(text)):\n line += \"-\"\n print(Fore.GREEN + line)\n print(Fore.WHITE)", "def text(self):\n surface_score = pygame.font.SysFont('Helvetic', 100).render(str(self.score), False, BLACK)\n screen.blit(surface_score, (50, 50))", "def flash_display_text(self):\r\n if self.flashMillisecs in self.flashTimes:\r\n self.countdownString.setStyleSheet(\"QLabel { background-color : white; color : black; }\")\r\n self.talkInfoString.setStyleSheet(\"QLabel { background-color : white; color : black; }\")\r\n else:\r\n self.countdownString.setStyleSheet(\"QLabel { background-color : black; color : white; }\")\r\n self.talkInfoString.setStyleSheet(\"QLabel { background-color : black; color : white; }\")\r\n self.flashMillisecs -= 50\r\n if self.flashMillisecs < 0:\r\n self.flashTimer.stop()", "def print_banner(text):\n print(Figlet(font='smslant').renderText(text))", "def show_title():\r\n complement = (\r\n '\\n __ ')\r\n title = ('\\n _______ _______________ ____ _______ __ ___ _ _______/ /_ ____ _____ ____ ____ ')\r\n title += ('\\n / ___/ / / / ___/ ___/ _ \\/ __ \\/ ___/ / / / / _ \\| |/_/ ___/ __ \\/ __ `/ __ \\/ __ `/ _ \\ ')\r\n title += ('\\n/ /__/ /_/ / / / / / __/ / / / /__/ /_/ / / __/> </ /__/ / / / /_/ / / / / /_/ / __/ ')\r\n title += ('\\n\\___/\\__,_/_/ /_/ \\___/_/ /_/\\___/\\__, / \\___/_/|_|\\___/_/ /_/\\__,_/_/ /_/\\__, /\\___/ ')\r\n title += ('\\n /____/ /____/ ')\r\n # Add Styles\r\n break_line = ('-' * len(complement) + \"\\n\") * 2\r\n print(\"{}\\n{}\\n{}\\n\".format(break_line, title, break_line))", "def showText(pos):\n\treturn OnscreenText( \\\n\t\ttext=\" \", \\\n\t\tstyle=1, fg=(0,0,0,1), pos=(-1.3, pos), \\\n\t\talign=TextNode.ALeft, scale = .06, mayChange = True)", "def display_toxics(span, text):\n html = \"<p class='spans'>\"\n for ind, char in enumerate(text):\n if ind in span:\n html += \"<b style='color:red'>\" + char + '</b>'\n else:\n html += char\n html += '</p>'\n return html", "def display(self, color = (190,205,205), add = False):\r\n\t\tpass", "def prBlueBG(text):\n print(\"\\033[44m{}\\033[0m\".format(text), sep=\"\")", "def set_intro_text(self, text, color):\n text_y_position = (self.border.inner_y() + self.objects['titleimage'].t()) / 2\n self.objects['introtext'] = LCARSText((self.width/2, text_y_position),\n text,\n 36,\n TextAlign.XALIGN_CENTRE, color, Colours.BG, True)", "def display_tournament(self, title: str, subtitle: str = \"\\n\", datas: list = None):\n self.clean()\n print(f\"{title}\")\n print(f\"{subtitle}\\n\")\n for data in datas:\n print(f\"\\t{data}\")\n print(\"\\n\" * 2)\n self.stand_by_msg(\"\")" ]
[ "0.70619345", "0.6629809", "0.61023265", "0.60484195", "0.6003638", "0.59983635", "0.5956739", "0.59418565", "0.58387935", "0.58370787", "0.5800514", "0.5796084", "0.5772227", "0.5701921", "0.56968796", "0.5666018", "0.5659659", "0.565431", "0.56356025", "0.56096834", "0.5607825", "0.5569401", "0.55689144", "0.55333257", "0.5514296", "0.5513843", "0.5493466", "0.54753673", "0.54742074", "0.5452914" ]
0.7079763
0
timestamp of last message
def last_timestamp(self): return self._last_timestamp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lastMessageReceived():", "def get_last_timestamp(self):\n return self._frame_timestamp", "def get_last_time(self):\n \n return self._last", "def lasttime(self):\n if hasattr(self, \"_lasttime\"):\n return self._lasttime\n else:\n return None", "def last_timestamp(self):\n LOGGER.debug('Getting last_timestamp as: %s', self._last_timestamp)\n return self._last_timestamp", "def last_time(self) -> datetime:\n return self.activities[-1].timestamp", "def last(self):\n if len(self._messages) == 0:\n return ''\n else:\n return self.format_message(self._messages[-1])", "def last_event_time(self) -> str:\n return pulumi.get(self, \"last_event_time\")", "def get_last_update_time(self):\n return self.last_update_time", "def last_update_time(self):\n return self._last_update_time", "def last_count_update_time(self):\n return self.__last_count_update_time", "def when_last_chat_with(self, actor_label):\n query = read_query('trust/when_last_chat_with') % actor_label\n response = self._submit_query(query)\n\n return response[0]['time']['value'].split('/')[-1] if response != [] else ''", "def last_updated_time(self) -> str:\n return pulumi.get(self, \"last_updated_time\")", "def last_activity(self):\n\t\tif not self.lastseen:\n\t\t\treturn 'nooit'\n\n\t\t# this was once made by myself (Erick)\n\t\t# return self.lastseen.strftime(\"%d %B %Y, %H:%M:%S\")\n\n\t\t# this is later added by the author of the Flask Tutorial (Michael)\n\t\treturn moment(self.lastseen).format('LLL')", "def last_post_date(self):\n last_reply = Reply.query.filter_by(\n thread_id=self.id).order_by(Reply.id.desc()).first()\n\n if last_reply:\n return last_reply.date_created\n\n return self.date_created", "def last_heartbeat_time(self) -> str:\n return pulumi.get(self, \"last_heartbeat_time\")", "def lastEventTime(self):\n return self._lastEventTime", "def last_updated_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"last_updated_time\")", "def last_update(self):\n return self._last_update", "def last_update(self):\n return self._last_update", "def get_time_since_last_seen(self):\n return time.time() - self.time_last_seen", "def calcLastMessageId(self): \n lastReadMessageId = self.lastReadMessageId # if stilll None read all from that global chat ( add field and add to this field )\n try:\n if(len(self.myMsgs()) > 0):\n last = self.myMsgs().last() \n if(last.globalMsg.id > self.lastReadMessageId): # in locals last id = 0 // \n lastReadMessageId = last.globalMsg.id\n except AttributeError as er:\n print('\\n')\n print('IntermediateLayerForMessaging . lastMessageId error in class ' + self.__class__+ ' and instance '+ self)\n print('\\n')\n print(er)\n print('\\n')\n except Exception as er:\n print('Wierd Unexpected Error')\n print(er)\n self.lastReadMessageId = lastReadMessageId\n self.save()", "def _get_inner_time(self):\n timestamp = self.last_two_timestamps[0]\n if not timestamp:\n return '0000-00-00 00:00:00'\n return str(timestamp)", "def last_config_send_time(self) -> str:\n return pulumi.get(self, \"last_config_send_time\")", "def peek_last_datetime(self, topic):\n queue = self._get_queue(topic)\n self.logger.debug(\"TopicManager queue size is: %i\" % len(queue))\n datetime_value = 0\n if queue:\n datetime_value = queue[-1]['data']['dateTime']\n\n return datetime_value", "def last_update_datetime(self):\n return datetime.strptime(self.last_update, \"%Y-%m-%d %H:%M:%S.%f\")", "def last_update_datetime(self):\n return datetime.strptime(self.last_update, \"%Y-%m-%d %H:%M:%S.%f\")", "def last_error_time(self) -> str:\n return pulumi.get(self, \"last_error_time\")", "def dt_last_update(self):\n return self.last_update", "def last_updated_time(self) -> datetime.datetime:\n return self.__last_updated_time" ]
[ "0.7769647", "0.72082126", "0.71306455", "0.70400333", "0.700233", "0.68460685", "0.6843184", "0.6805808", "0.67488164", "0.6714291", "0.6710211", "0.66942847", "0.6675987", "0.66672134", "0.661491", "0.6594136", "0.6582506", "0.65269333", "0.6519992", "0.6519992", "0.65007913", "0.6497863", "0.6453791", "0.6434583", "0.64192677", "0.64174753", "0.64174753", "0.64000964", "0.6392447", "0.63904905" ]
0.7429002
1
dictionary of all information multiple messages (key is a string, value is a list of lists that contains the messages)
def msg_info_multiple_dict(self): return self._msg_info_multiple_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def messages(self):\n return {}", "def _add_message_info_multiple(self, msg_info):\n if msg_info.key in self._msg_info_multiple_dict:\n if msg_info.is_continued:\n self._msg_info_multiple_dict[msg_info.key][-1].append(msg_info.value)\n else:\n self._msg_info_multiple_dict[msg_info.key].append([msg_info.value])\n else:\n self._msg_info_multiple_dict[msg_info.key] = [[msg_info.value]]\n self._msg_info_multiple_dict_types[msg_info.key] = msg_info.type", "def messages(self) -> dict:\n raise NotImplementedError", "def create_messages(data: list, month_name: str) -> dict:\n\n messages = {}\n for person in data:\n name, amount, phone = person[0], person[1], person[2]\n body = f\"Hey {name}, your amount owing for the month of {month_name} is {amount}.\"\n\n messages[phone] = body\n\n print(\"\\nHere are the messages to be sent:\\n\")\n for key in messages:\n print(messages[key])\n\n return messages", "def extract_messages(self,msg_list):\n msgs = []\n for m in msg_list:\n msgs.append(json.loads(str(m)))\n return msgs", "def create_threats_objects(messages: list) -> list:\n threats_info_map = []\n message_keys = ['sender', 'recipient', 'subject']\n for message in messages:\n for threat in message.get('threatsInfoMap'):\n threat_object = {key: value for key, value in message.items() if key in message_keys}\n threat_object.update(threat)\n threats_info_map.append(threat_object)\n\n return threats_info_map", "def get_messages():\n dynamodb = boto3.client('dynamodb')\n messages = []\n _messages = []\n paginator = dynamodb.get_paginator('scan')\n for page in paginator.paginate(TableName=os.environ.get('MESSAGE_TABLE_NAME')):\n _messages.extend(page['Items'])\n\n if not _messages:\n return _messages\n\n for message in _messages:\n m = {\n message['timestamp']['N']: message['data']['S']\n }\n messages.append(m)\n\n # sort list of dict by timestamp\n messages = list(map(dict, sorted(list(i.items()) for i in messages)))\n\n _messages = []\n for message in messages:\n _, v = list(message.items())[0]\n _messages.append(v)\n\n return _messages", "def list_messages(self):", "def ReceivedMessageCatalog():\n aio_updates = numpy.zeros((len(AIO_NODE_HELPER), len(MESSAGE_TYPE_HELPER)),\n dtype=numpy.ulonglong)\n # aio_updates[source][message_type] > 0 if any message is received.\n aio_util.GetAioUpdates(aio_updates)\n received_messages = {}\n for message_type in range(len(MESSAGE_TYPE_HELPER)):\n aio_nodes = numpy.nonzero(aio_updates[:, message_type])[0]\n if aio_nodes.size:\n received_messages[message_type] = aio_nodes\n return received_messages", "def make_dict_from_messages(messages, full_dict=None):\n\n\tout = defaultdict(dict)\n\tif full_dict==None:\n\t\tfull_dict = get_full_dict(frappe.local.lang)\n\n\tfor m in messages:\n\t\tif m[0] in full_dict:\n\t\t\tif m[1] in full_dict[m[0]]:\n\t\t\t\tout[m[1]] = full_dict[m[0]][m[1]]\n\n\treturn dict(out)", "def msg_info_dict(self):\n return self._msg_info_dict", "def select_all_messages(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM LED_MESSAGE\")\n\n rows = cur.fetchall()\n\n all_m = []\n for row in rows:\n l_m = { \n \"ts\": row[0],\n \"elapsed\": float(row[1]), \n \"font\": row[2], \n \"type\": row[3], \n \"body\": row[4], \n \"color\": row[5], \n \"behavior\": row[6]\n }\n all_m.append({ \"message\":{ \"payload\": l_m }})\n\n return all_m", "def get_messages_from_cursor(self):\n\n def get_msg(r):\n msg = dict(\n id=r[0],\n datetime=r[1],\n text=r[2],\n sender=r[3],\n media=r[4],\n **json.loads(r[5]),\n )\n if len(r) > 6:\n msg['dialog'] = r[6]\n for field in DATETIME_FIELDS:\n if field not in msg:\n continue\n tz_field = msg[field]\n if isinstance(tz_field, str):\n msg[field] = parse_time(tz_field)\n return {k: v for k, v in msg.items() if v} # get rid of Falsey\n\n return {\n r[0]: get_msg(r)\n for r in self.cur.fetchall()\n }", "def alert_messages(messages):\n return {\n 'messages': messages\n }", "def get_message_list(self):\n count = 0\n for msg in self.mbox:\n if msg['From'].find(self.config['tgt_email']) > -1:\n dtime = arrow.get(msg['Date'], 'ddd, D MMM YYYY HH:mm:ss ZZ')\n message = dict({'from': msg['From'],\n 'date': dtime,\n 'subject': msg['Subject']})\n # boundary = msg.get_boundary()\n # if boundary is not None:\n # bounds = [m.start() for m\n # in re.finditer(boundary, str(msg))]\n # else:\n # bounds = list()\n # if len(bounds) > 2:\n # message['text'] = str(msg)[bounds[1]:bounds[2]]\n # else:\n # message['text'] = None\n pl = None\n if msg['Subject'].find(\":\") == -1:\n finished = False\n pl = msg.get_payload()\n while finished is False:\n if isinstance(pl, str):\n finished = True\n elif isinstance(pl, list):\n pl = pl[0].get_payload()\n else:\n raise ValueError(\"Non-list, non-str payload?\")\n break\n message['text'] = self.clean_text(str(pl))\n\n if message['text'] is not None:\n self.messages.append(message)\n count += 1\n # print count\n self.messages.sort(key=lambda item: item['date'])", "def __init__(self, users, messages):\n self.users = users\n self.messages = messages\n # addd dict with each user as key and there corresponidn gmessages", "def get_message_list(self):\n \n result = requests.get(\n url = root_url + '/{}'.format(\"message\"),\n headers = { 'Authorization': api_key },\n )\n\n message_list = result.json()\n\n self.message_list = message_list", "def make_dict_from_messages(messages, full_dict=None, load_user_translation=True):\n\tout = {}\n\tif full_dict is None:\n\t\tif load_user_translation:\n\t\t\tfull_dict = get_all_translations(frappe.local.lang)\n\t\telse:\n\t\t\tfull_dict = get_translations_from_apps(frappe.local.lang)\n\n\tfor m in messages:\n\t\tif m[1] in full_dict:\n\t\t\tout[m[1]] = full_dict[m[1]]\n\t\t# check if msg with context as key exist eg. msg:context\n\t\tif len(m) > 2 and m[2]:\n\t\t\tkey = m[1] + \":\" + m[2]\n\t\t\tif full_dict.get(key):\n\t\t\t\tout[key] = full_dict[key]\n\n\treturn out", "def create_messages_output(messages_list: list) -> list:\n outputs = []\n message_keys = ['spamScore', 'phishScore', 'threatsInfoMap', 'messageTime', 'impostorScore', 'malwareScore',\n 'cluster', 'subject', 'quarantineFolder', 'quarantineRule', 'policyRoutes', 'modulesRun',\n 'messageSize', 'messageParts', 'completelyRewritten', 'id', 'sender', 'recipient', 'senderIP',\n 'messageID', 'GUID']\n\n header_fields = ['headerFrom', 'headerReplyTo', 'fromAddress', 'fromAddress', 'ccAddresses',\n 'replyToAddress', 'toAddresses', 'xmailer']\n\n for message in messages_list:\n message_header = {}\n for field in header_fields:\n message_header[field] = message[field]\n\n message_output = {key: value for key, value in message.items() if key in message_keys}\n message_output['Header'] = message_header\n outputs.append(message_output)\n\n return outputs", "def long_format (self):\n\n long = {}\n if len(self.list) > 0:\n for key in self.list[0].keys():\n long[key] = [actor[key] for actor in self.list]\n return long", "def group_alerts(messages: list[dict]) -> dict[str, list[Alert]]:\n alerts = defaultdict(list)\n for m in messages:\n if \"subtype\" not in m or m[\"subtype\"] != \"bot_message\":\n logging.debug(\n f'Skipping message \\'{m[\"text\"]}\\' as it does not come from a bot'\n )\n continue\n\n timestamp = float(m[\"ts\"])\n for at in m.get(\"attachments\", []):\n if \"title\" not in at:\n continue\n\n mg = re.match(r\"Alert: (.*) \\[(FIRING:\\d+|RESOLVED)\\] *(.*)$\", at[\"title\"])\n if not mg:\n continue\n\n alert_name = mg.group(1)\n alert_message = mg.group(3)\n\n if not alert_name:\n logging.debug(f'no alert name in title {at[\"title\"]}. Skipping')\n continue\n\n # If there's only one alert related to the alert_name, message will be part\n # of the title. If not, alert messages will be part of the text under\n # \"Alerts Firing\" / \"Alerts Resolved\"\n if alert_message:\n alert_state = \"FIRING\" if \"FIRING\" in mg.group(2) else mg.group(2)\n alerts[alert_name].append(\n Alert(\n state=alert_state,\n message=alert_message,\n timestamp=timestamp,\n username=m[\"username\"],\n )\n )\n else:\n alert_state = \"\"\n for line in at[\"text\"].split(\"\\n\"):\n if \"Alerts Firing\" in line:\n alert_state = \"FIRING\"\n elif \"Alerts Resolved\" in line:\n alert_state = \"RESOLVED\"\n elif line.startswith(\"-\"):\n mg = re.match(\"^- (.+)$\", line)\n if not mg:\n continue\n alert_message = mg.group(1)\n alerts[alert_name].append(\n Alert(\n state=alert_state,\n message=alert_message,\n timestamp=timestamp,\n username=m[\"username\"],\n )\n )\n\n return dict(alerts)", "def stat_message(message):\n return {\n 'id': message['id'],\n 'age': message['age'],\n 'created': message['created'],\n }", "def messages(self) -> Mapping[str, wrappers.MessageType]:\n return collections.OrderedDict(\n (k, v) for k, v in self.all_messages.items()\n if not v.meta.address.parent\n )", "def parseMsg(self):\n # These 4 elements are always present\n # \"ToUserName\"\n # \"FromUserName\"\n # \"CreateTime\"\n # \"MsgType\"\n\n # Following elements depends on MsgType\n # \"MsgId\"\n # \"Content\"\n # \"MediaId\"\n # \"PicUrl\"\n # \"Format\"\n # \"ThumbMediaId\"\n # \"Location_X\"\n # \"Location_Y\"\n # \"Scale\"\n # \"Label\"\n # \"Title\"\n # \"Description\"\n # \"Url\"\n # \"Event\"\n # \"EventKey\"\n # \"Ticket\"\n # \"Latitude\"\n # \"Longitude\"\n # \"Precision\"\n # \"Recognition\"\n\n def getField(req, key):\n if req.find(key) != None:\n return req.find(key).text\n\n\n msg = {}\n req = et.fromstring(self.request.body.decode(\"utf-8\"))\n\n # These 4 elements are always present\n msg[\"ToUserName\"] = getField(req, \"ToUserName\")\n msg[\"FromUserName\"] = getField(req, \"FromUserName\")\n msg[\"CreateTime\"] = getField(req, \"CreateTime\")\n msg[\"MsgType\"] = getField(req, \"MsgType\")\n\n # Following elements depends on MsgType\n msg[\"MsgId\"] = getField(req, \"MsgId\")\n msg[\"Content\"] = getField(req, \"Content\")\n msg[\"MediaId\"] = getField(req, \"MediaId\")\n msg[\"PicUrl\"] = getField(req, \"PicUrl\")\n msg[\"Format\"] = getField(req, \"Format\")\n msg[\"ThumbMediaId\"] = getField(req, \"ThumbMediaId\")\n msg[\"Location_X\"] = getField(req, \"Location_X\")\n msg[\"Location_Y\"] = getField(req, \"Location_Y\")\n msg[\"Scale\"] = getField(req, \"Scale\")\n msg[\"Label\"] = getField(req, \"Label\")\n msg[\"Title\"] = getField(req, \"Title\")\n msg[\"Description\"] = getField(req, \"Description\")\n msg[\"Url\"] = getField(req, \"Url\")\n msg[\"Event\"] = getField(req, \"Event\")\n msg[\"EventKey\"] = getField(req, \"EventKey\")\n msg[\"Ticket\"] = getField(req, \"Ticket\")\n msg[\"Latitude\"] = getField(req, \"Latitude\")\n msg[\"Longitude\"] = getField(req, \"Longitude\")\n msg[\"Precision\"] = getField(req, \"Precision\")\n msg[\"Recognition\"] = getField(req, \"Recognition\")\n return msg", "def get_all_msgs(self):\n data = self.database.select(self.tname)\n msgs = []\n for item in data:\n msgs.append((item[0], self.data_to_msg(item)))\n return msgs", "def parse_messages(self, orig):\n data=orig[1:len(orig)-1]\n output=[]\n for i in range(0, len(data), 3):\n message_data=data[i].split(',')\n message_text=data[i+1]\n output.append({'status':message_data[1], 'number':message_data[2],'date':message_data[4],'time':message_data[5],'text':message_text})\n return output", "def top_level_messages(self) -> Mapping[str, wrappers.MessageType]:\n return {\n k: v\n for p in self.protos.values()\n for k, v in p.messages.items()\n }", "def sub_jsons(self, msg):\n i = 0\n result = []\n split_msg = msg.split('}{')\n for s in range(len(split_msg)):\n if i==0 and len(split_msg)==1:\n result.append(split_msg[s])\n elif i==0 and len(split_msg)>1:\n result.append(split_msg[s]+\"}\")\n elif i==len(split_msg)-1 and len(split_msg)>1:\n result.append(\"{\"+split_msg[s])\n else:\n result.append(\"{\"+split_msg[s]+\"}\")\n i+=1\n return result", "def get_info(self,honeypotids):\n req = {\"type\":\"get_info\",\n \"from\":self.network.mc_id,\n \"to\": honeypotids}\n expect_dict = {\"type\":\"send_info\"}\n msg_list = self.send_receive(req,honeypotids,expect_dict)\n answer = {}\n for msg in msg_list:\n answer[msg[\"from\"]] = msg[\"info\"]\n return answer", "def read_messages(self):\n \n messages = copy.deepcopy(dict(self.unique_messages))\n self.unique_messages.clear()\n return messages" ]
[ "0.6741191", "0.6618709", "0.65769136", "0.65106976", "0.628509", "0.6223275", "0.6112383", "0.61069626", "0.60848707", "0.605104", "0.60022557", "0.5961217", "0.59569836", "0.59249747", "0.5894613", "0.5869188", "0.584395", "0.5818667", "0.5804151", "0.5800034", "0.5740932", "0.5730172", "0.568255", "0.56691015", "0.5659133", "0.5654255", "0.5638725", "0.5637917", "0.56252706", "0.5615362" ]
0.74228317
0
list of all changed parameters (tuple of (timestamp, name, value))
def changed_parameters(self): return self._changed_parameters
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parameters_changed(self):\n pass", "def getChanges():", "def parameters(self):\n return []", "def parameters_list(self):\n return [getattr(self.parameters, p) for p in self.parameters_names()]", "def parameter_names(self) -> List[str]:", "def parameters(self):\n return [p for _, a in vars(self).items() for p in self._params(a)]", "def sensor_parameters_list(self):\n return list(self.params_f.keys()) + list(self.params_i.keys())", "def param(self):\r\n\r\n return []", "def update_parameters(self, timestamp, inputs):\n pass", "def param(self):\n return []", "def param(self):\n return []", "def list_value_changes(self, field_name):\n\n t = self.data['timestamp']\n x = self.data[field_name]\n indices = t != 0 # filter out 0 values\n t = t[indices]\n x = x[indices]\n if len(t) == 0: return []\n ret = [(t[0], x[0])]\n indices = np.where(x[:-1] != x[1:])[0] + 1\n ret.extend(zip(t[indices], x[indices]))\n return ret", "def parameter_values(self) -> List[Tuple[str, Any]]:\n pvs = [(param, getattr(self, variable))\n for variable, param in self.variable_name_to_query_param.items()]\n return [(p, v) for p, v in pvs if v is not None]", "def param(self):\r\n return []", "def parameter_names(self) -> list:\n parameters = []\n parameters.extend(self.properties.parameter_names)\n return parameters", "def updateParameters(self, parameters):", "def parameters(self):\n return self._params", "def parameters(self):", "def parameters(self):\n #print \"in instrument.parameter()\"\n return self._params", "def _get_parameters(self) -> list:\n return self.parameters", "def updateParameters(self):\n\n return", "def params(self) -> Tuple[Parameter, ...]:\n raise NotImplementedError()", "def get_params_snapshot(self):\n ...", "def get_params(self):\n return list(self.params.values())", "def get_parameters(self):\n return(_deepcopy(self.parameters))", "def getParameters(self): #$NON-NLS-1$\r", "def parameters(self):\n pass", "def get_params(self):\n return []", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return" ]
[ "0.68223023", "0.66279924", "0.64772093", "0.6446315", "0.641585", "0.6374758", "0.6344504", "0.63368213", "0.6317814", "0.63148576", "0.63148576", "0.6312907", "0.6299366", "0.6272338", "0.61801887", "0.61597204", "0.6142085", "0.6137029", "0.6132782", "0.6114759", "0.6079023", "0.6074004", "0.60709095", "0.6053446", "0.6036946", "0.60339105", "0.5992888", "0.5992076", "0.5984389", "0.5984389" ]
0.72855586
0
dict of MessageLoggingTagged objects
def logged_messages_tagged(self): return self._logged_messages_tagged
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tags_dict(self):\n return ({'name': 'tag', 'attrs': {'k': k, 'v': v}} for k, v in self.tags.items())", "def tag_dict(self):\n tag_dict = dict()\n for document in self.documents:\n for tag in document.tags:\n tag_type = tag['tag']\n tag_dict[tag_type] = tag_dict.get(tag_type, []) + [tag]\n return tag_dict", "def get_rawlogs_by_tag(tags):\n return dict(get_rawlogs_by_tag_it(tags))", "def get_tag_dict(self):\n return self.tag_dict", "def msg_info_multiple_dict(self):\n return self._msg_info_multiple_dict", "def messages(self):\n return {}", "def messages(self) -> dict:\n raise NotImplementedError", "def _genLookupDict(self, tagged):\n lookup_dict = {}\n for token,pos in tagged:\n if pos not in lookup_dict.keys():\n lookup_dict[pos] = []\n lookup_dict[pos].append(token)\n return lookup_dict", "def _metric_tags(self):\r\n tags = [\r\n u'{}.{}:{}'.format(self.__class__.__name__, attr, self[attr])\r\n for attr in self.metric_tag_fields\r\n if attr in self.attributes\r\n ]\r\n tags.append(u'model_class:{}'.format(self.__class__.__name__))\r\n return tags", "def get_tags(self):\n tags = self.AWS_TAGS\n\n label_selector = self.label_selector.split('=')\n label_tag = {'Key': label_selector[0], 'Value': label_selector[1]}\n tags.append(label_tag)\n\n annotation_tag = {'Key': self.expire_annotation, 'Value': str(int(self.now + self.DAY_AND_NIGHT))}\n tags.append(annotation_tag)\n\n return tags", "def tags(self) -> Dict:\n return dict(self.client.get_instances_id_tags(self.id_))", "def get_tags(self) -> Dict:\n return self.orthanc.get_instance_tags(self.identifier)", "def tags(self) -> dict:\n return self._tags", "def get_word_tag_dict(self):\n return self.word_tag_dict", "def stat_message(message):\n return {\n 'id': message['id'],\n 'age': message['age'],\n 'created': message['created'],\n }", "def registered_tags(self):\r\n return self._mapping.keys()", "def items(self):\n return self._tagged.items()", "def msg_info_dict(self):\n return self._msg_info_dict", "def as_dict(self):\n return {'tag_type': self.tag_type, 'value': self.value}", "def tagger():", "def tags(self) -> dict:\n\n return self._tags or None # store trivial tags as empty (for iteration), return as None", "def get_tag_and_body(tagged_events):\n tag_events = []\n for tag_event in tagged_events:\n body = tag_event[1].get(\"body\")\n tb = {\"body\": body}\n tb[\"tags\"] = []\n tb[\"display_name\"] = tag_event[1].get(\"display_name\")\n for tag in tag_event[1][\"tag\"][\"labels\"]:\n tb[\"tags\"].append(tag)\n tag_events.append(tb)\n return tag_events", "def as_dict(self):\n return {\n 'interface_id': self.interface.id,\n 'tag': self.user_tag.as_dict() if self.user_tag else None\n }", "def tags(self) -> Mapping[str, Any]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Mapping[str, Any]:\n return pulumi.get(self, \"tags\")", "def values(self):\n return self._tagged.values()", "def simplified_tags(self) -> Dict:\n return dict(self.client.get_instances_id_tags(self.id_, params={'simplify': True}))", "def messages(self) -> Mapping[str, wrappers.MessageType]:\n return collections.OrderedDict(\n (k, v) for k, v in self.all_messages.items()\n if not v.meta.address.parent\n )", "def get_dagmc_tags(my_core):\n\n dagmc_tags = {}\n\n dagmc_tags['geom_dim'] = my_core.tag_get_handle('GEOM_DIMENSION', size=1, tag_type=types.MB_TYPE_INTEGER,\n storage_type=types.MB_TAG_SPARSE, create_if_missing=True) # geometric dimension\n\n dagmc_tags['category'] = my_core.tag_get_handle('CATEGORY', size=32, tag_type=types.MB_TYPE_OPAQUE,\n storage_type=types.MB_TAG_SPARSE, create_if_missing=True) # the category\n\n dagmc_tags['global_id'] = my_core.tag_get_handle('GLOBAL_ID', size=1, tag_type=types.MB_TYPE_INTEGER,\n\n storage_type=types.MB_TAG_SPARSE, create_if_missing=True) # id\n\n return dagmc_tags", "def tags(self) -> Mapping[str, str]:\n return pulumi.get(self, \"tags\")" ]
[ "0.6306031", "0.62712485", "0.6040021", "0.60088646", "0.59619224", "0.5934395", "0.58777297", "0.5834602", "0.57534343", "0.5654458", "0.5623019", "0.5615123", "0.55820113", "0.5542628", "0.5486026", "0.5482367", "0.54719156", "0.53995234", "0.53904647", "0.5385974", "0.53716207", "0.53568834", "0.5350517", "0.5298251", "0.5298251", "0.5294525", "0.52881545", "0.5237835", "0.5236383", "0.5197114" ]
0.69523114
0
True if a file corruption got detected
def file_corruption(self): return self._file_corrupt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_packet_corruption(self, header):\n data_corrupt = False\n if header.msg_type == 0 or header.msg_size == 0 or header.msg_size > 10000:\n if not self._file_corrupt and self._debug:\n print('File corruption detected')\n data_corrupt = True\n self._file_corrupt = True\n\n return data_corrupt", "def check_corrupted_files(self):\r\n for store in STORES:\r\n path = f\"{self.system.config_path}/.storage/{STORES[store]}\"\r\n if os.path.exists(path):\r\n if os.stat(path).st_size == 0:\r\n # File is empty (corrupted)\r\n return True\r\n return False", "def _check_truncation(self):\n\n temp_pos = self._handle.tell()\n self._handle.seek(-28, 2)\n eof = self._handle.read()\n self._handle.seek(temp_pos)\n if eof == _bgzf_eof:\n return False\n else:\n warnings.BytesWarning('No EOF character found. File may be truncated')\n return True", "def is_file_size_error(self):\n return self._tag == 'file_size_error'", "def __checkFile(self, filename):\n \n try:\n with open(filename, 'r') as f:\n first_line = f.readline()\n \n if not len(first_line.split(\"\\t\")) == 19:\n raise BadProteomeScoutFile(\"N/A\")\n \n \n except:\n BadProteomeScoutFile(\"Invalid ProteomeScout flat file %s.\\nFile is invalid or corrupted\" % str(filename))", "def validate_file_handler(self):\n if self.fh.closed:\n try:\n self.fh = open(self.path, \"r\")\n self.fh.seek(0, 2)\n except OSError as err:\n logging.error(\"Could not reopen file: {}\".format(err))\n return False\n\n open_stat = os.fstat(self.fh.fileno())\n try:\n file_stat = os.stat(self.path)\n except OSError as err:\n logging.error(\"Could not stat file: {}\".format(err))\n return False\n\n if open_stat != file_stat:\n self.log\n self.fh.close()\n return False\n\n return True", "def is_valid(path):\n with open(path, 'rb') as handle:\n size = os.fstat(handle.fileno()).st_size\n try:\n mgz.header.parse_stream(handle)\n mgz.body.meta.parse_stream(handle)\n while handle.tell() < size:\n mgz.body.operation.parse_stream(handle)\n print('valid')\n return True\n except ConstructError:\n print('invalid')\n return False", "def test_is_not_delicious_file(self):\r\n bad_file = StringIO.StringIO()\r\n bad_file.write('failing tests please')\r\n bad_file.seek(0)\r\n\r\n self.assertTrue(\r\n not DelImporter.can_handle(bad_file),\r\n \"DelImporter cannot handle this file\")\r\n\r\n bad_file.close()", "def verifyShiftFile(self):\n if self['refimage'] and fu.findFile(self['refimage']):\n return True\n else: return False", "def test_is_not_delicious_file(self):\r\n bad_file = StringIO.StringIO()\r\n bad_file.write('failing tests please')\r\n bad_file.seek(0)\r\n\r\n self.assertTrue(\r\n not DelXMLImporter.can_handle(bad_file),\r\n \"DelXMLImporter cannot handle this file\")\r\n\r\n bad_file.close()", "def test_verify_corrupt_archive(self):\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n output_files = os.listdir(\"testfiles/output\")\n archives = [elem for elem in output_files if \"vol\" in elem]\n for archive in archives:\n # Edit source file\n with open(\"testfiles/output/\" + archive, 'r+') as f:\n f.write('This writes text into each archive file to corrupt it.')\n # Test verify for the file\n try:\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable', options=[])\n except CmdError as e:\n # Should return a 21 error code for \"hash mismatch\"\n self.assertEqual(e.exit_status, 21, str(e))\n else:\n self.fail('Expected Hash Mismatch Error not thrown')", "def reportFileCorruption(gpfn, sitemover):\n\n # except for lcgcp site mover (since it does not return a proper SURL, the consistency report is done in the site mover)\n _copytool, dummy = getCopytool(mode=\"get\")\n if _copytool != \"lcgcp\" and _copytool != \"lcg-cp\" and _copytool != \"storm\":\n if gpfn != \"\":\n try:\n sitemover.reportFileCorruption(gpfn)\n except Exception, e:\n tolog(\"!!WARNING!!1212!! Caught exception: %s\" % (e))\n else:\n tolog(\"Reported file corruption\")\n else:\n tolog(\"!!WARNING!!1990!! Can not report SURL for corrupt file to consistency server since SURL is an empty string\")\n else:\n tolog(\"(Already reported corrupted file)\")", "def test_file_integrity_return_error_in_case_of_bad_md5():\n test_file = open('./testfile.tmp', 'a')\n test_file.close()\n\n test_file_path = os.path.realpath('./testfile.tmp')\n test_file_md5 = hashlib.md5(open(test_file_path, 'rb').read()).hexdigest()\n\n bad_md5 = 'some_noise_%s' % test_file_md5\n\n result = PackageDownloadHelper.check_file_integrity(test_file_path, bad_md5)\n\n assert isinstance(result, ApiResponse)", "def is_file_type_error(self):\n return self._tag == 'file_type_error'", "def check_file_validity(self):\n # Initialize key variables\n file_ = self.tailed_file\n\n # Check if exists\n if os.path.exists(file_) is False:\n log_message = 'File {} does not exist.'.format(file_)\n log.log2die(1018, log_message)\n\n # Check if file\n if os.path.isfile(file_) is False:\n log_message = '{} is not a file.'.format(file_)\n log.log2die(1035, log_message)\n\n # Check if readable\n if not os.access(file_, os.R_OK):\n log_message = 'File {} is not readable.'.format(file_)\n log.log2die(1036, log_message)", "def eof_check(self) -> bool:\n eof = False\n curr_pos = self.fileobject.tell()\n # print(curr_pos, self.st_size)\n chunk = self.fileobject.read(25)\n if chunk == '':\n # Is there something on the back burner??\n if len(self._backburner) > 0:\n self.fileobject = self._backburner.pop()\n # TODO: what if it is the end of the back burner file? Is that handled?\n else:\n eof = True\n else:\n self.fileobject.seek(curr_pos)\n return eof", "def preliminary_file_check(self):\n\n if self.has_error():\n return False\n\n if not self.filepath:\n self.add_error(\"A file was specified!\")\n return False\n\n if not isfile(self.filepath):\n self.add_error(\"The file was not found: %s\" % basename(self.filepath))\n return False\n\n if getsize(self.filepath) < 1:\n self.add_error(\"The file is empty (no bytes): %s\" % basename(self.filepath))\n return False\n\n if self.file_ext in ['xls', 'xlsx']:\n self.is_excel = True\n\n return True", "def verify_file(filename, start=0):\n with open(filename, 'rb') as f:\n find(start)\n file_byte = f.read(1)\n num_bytes = 0\n while file_byte:\n chip_byte = read(type='dec', inc=True)\n if ord(chip_byte) != ord(file_byte):\n err_str = 'Verify error at byte %0X: chip=%0x file=%0x' % (start+num_bytes, ord(chip_byte), ord(file_byte))\n cleanup()\n raise Exception(err_str)\n num_bytes += 1\n file_byte = f.read(1)\n return num_bytes", "def is_valid_file(self, file_path):\n return True", "def has_checksum_file(self):\n return self.checksum_file_path.is_file()", "def FileCheck(fn):\n try:\n open(fn, \"r\")\n return 1\n except IOError:\n print(\"Error: File does not exist.\")\n return 0", "def check_fd(fd):\n try:\n os.fstat(fd)\n return True\n except OSError as error:\n if error.errno != errno.EBADF:\n raise\n return False", "def test_existing_file_after_assert_error(exist_of_file):\n try:\n assert read_magic_number(exist_of_file)\n except AssertionError:\n print(\"Now lets do check of existing file\")", "def check_magic(self, target: str):\n\t\twith open(target, \"rb+\") as archive:\n\t\t\tmagic = archive.read(4)\n\t\t\tif magic == struct.pack(\"I\", self.magic):\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False", "def check_file(filename, force, expected_file_size=1):\n if os.path.exists(filename):\n if force or os.path.getsize(filename) < expected_file_size:\n logger.debug(\" .. Removing old file '%s'.\", filename)\n os.remove(filename)\n return False\n else:\n return True\n return False", "def test_verify_corrupt_archive_compare_data(self):\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n output_files = os.listdir(\"testfiles/output\")\n archives = [elem for elem in output_files if \"vol\" in elem]\n for archive in archives:\n # Edit source file\n with open(\"testfiles/output/\" + archive, 'r+') as f:\n f.write('This writes text into each archive file to corrupt it.')\n # Test verify for the file\n try:\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable',\n options=[u\"--compare-data\"])\n except CmdError as e:\n # Should return a 21 error code for \"hash mismatch\"\n self.assertEqual(e.exit_status, 21, str(e))\n else:\n self.fail('Expected Hash Mismatch Error not thrown')", "def checkIfAllowedToModify(self):\n\n oldBytes = b''\n testFileName = self.MAPSTUDIO + self.inputFiles[0] + '.msb'\n\n with open(testFileName, 'rb') as oldf:\n oldBytes = oldf.read()\n\n # Try writing something to the file\n\n try:\n with open(testFileName, 'wb') as outf:\n outf.write(b'TESTINGIFICANWRITEINTOTHISFILE')\n except:\n return False\n\n # Because apparently for _some_ reason it doesn't throw an error sometimes(?) so we confirm if the file was actually modified\n\n newBytes = b''\n with open(testFileName, 'rb') as oldf:\n newBytes = oldf.read()\n\n if (oldBytes == newBytes):\n return False\n\n # Restore the file to normal\n\n with open(testFileName, 'wb') as outf:\n outf.write(oldBytes)\n\n oldBytes = b''\n newBytes = b''\n\n return True", "def test_file_integrity_remove_file_in_case_of_fail():\n test_file = open('./testfile.tmp', 'a')\n test_file.close()\n test_file_path = os.path.realpath('./testfile.tmp')\n test_file_md5 = hashlib.md5(open(test_file_path, 'rb').read()).hexdigest()\n\n bad_md5 = 'some_noise_%s' % test_file_md5\n\n PackageDownloadHelper.check_file_integrity(test_file_path, bad_md5)\n\n assert not os.path.isfile(test_file_path)", "def _assert_file_is_good(filename):\n\n if not filename:\n return\n\n assert os.path.isfile(filename), filename\n assert os.access(filename, os.R_OK), filename\n assert os.access(filename, os.W_OK), filename", "def does_file_have_416_issue(file_path: str) -> bool:\n with open(file_path, \"rb\") as file_handler:\n file_handler.seek(-1024, os.SEEK_END)\n if b\"416 Requested Range Not Satisfiable\" in file_handler.read():\n return True\n return False" ]
[ "0.7275737", "0.6909501", "0.67795235", "0.64751434", "0.6472965", "0.63858616", "0.6306912", "0.62951386", "0.62945735", "0.62230986", "0.6217393", "0.62134457", "0.61752665", "0.6157496", "0.6143077", "0.6092819", "0.60877305", "0.60756284", "0.6073221", "0.603981", "0.6015154", "0.60067314", "0.5968304", "0.5962967", "0.5950317", "0.5940514", "0.5932133", "0.59293", "0.591876", "0.5889632" ]
0.7284083
0
True if compat flag DEFAULT_PARAMETERS is set
def has_default_parameters(self): return self._compat_flags[0] & (0x1 << 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_defaults(self):\n fparam = FParameter(POSITIONAL_ONLY)\n assert fparam.kind == POSITIONAL_ONLY\n for k, v in FPARAM_DEFAULTS.items():\n assert getattr(fparam, k) == v", "def params_optional(self) -> bool:\n result = True\n if self.no_params:\n # We will return False, because there are no params at all - optional or not.\n return False\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n # We should allow you to print out the options to a YAML file and fill it out like a form.\n # So right now, it will create a long Kubernetes policy, but it will have lots of empty lists that we have to fill out. Oh well.\n if not parameter_details.default_value:\n # if not parameter.default_value and parameter.default_value != [] and parameter.default_value != \"\":\n result = False\n break\n return result", "def test_defaults(self):\n params = DefaultsInterface()\n # make sure from_param_server can be called repeatedly\n params.from_param_server()\n\n self.assertEqual(params.verbosity_param_w_default, 'info')\n\n self.assertEqual(params.int_param_w_default, 1)\n self.assertAlmostEqual(params.double_param_w_default, 1.1)\n self.assertEqual(params.str_param_w_default, \"Hello World\")\n self.assertEqual(params.bool_param_w_default, True)\n self.assertEqual(params.long_param_w_default_int, 1)\n self.assertEqual(params.long_param_w_default_int_str, -1)\n self.assertEqual(params.long_param_w_default_long_string, 9223372036854775807)\n\n self.assertEqual(params.vector_int_param_w_default, [1, 2, 3])\n self.assertEqual(params.vector_double_param_w_default, [1.1, 1.2, 1.3])\n self.assertEqual(params.vector_string_param_w_default, [\"Hello\", \"World\"])\n\n self.assertEqual(params.map_param_w_default, {\"Hello\": \"World\"})\n self.assertEqual(params.enum_int_param_w_default, 1)\n self.assertEqual(params.enum_str_param_w_default, \"One\")", "def use_defaults(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"use_defaults\")", "def use_defaults(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"use_defaults\")", "def isDefault(self) -> bool:\n ...", "def isDefault(self) -> bool:\n ...", "def isDefault(self) -> bool:\n ...", "def has_default(self):\n return self._default is not NOT_PROVIDED", "def parameters_default(cls):\n return cls._Parameters.__new__.__defaults__", "def params_required(self) -> bool:\n if self.no_params or self.params_optional:\n return False\n else:\n return True", "def use_defaults(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"use_defaults\")", "def test_defaults(self):\n varp = VarPositional()\n fparam = self.assert_iterable_and_get_fparam(varp)\n assert fparam.name == 'args'\n assert fparam.type == empty\n assert not fparam.converter\n assert not fparam.validator\n assert not fparam.metadata", "def initDefaults(self):\n return _libsbml.Parameter_initDefaults(self)", "def default_hparams():\n raise NotImplementedError('Not implemented')", "def _getIsValidParameters(self):\n return True, ''", "def set_default_parameters(self):\n super().set_default_parameters()", "def test_defaults(self):\n vark = VarKeyword()\n name, fparam = self.assert_mapping_and_get_fparam(vark)\n assert name == 'kwargs'\n assert fparam.type == empty\n assert not fparam.converter\n assert not fparam.validator\n assert not fparam.metadata", "def test_overridable_parameter() -> None:\n param_dict = ParamClass.get_overridable_parameters()\n assert \"name\" in param_dict\n assert \"flag\" in param_dict\n assert \"not_flag\" in param_dict\n assert \"seed\" in param_dict\n assert \"number\" in param_dict\n assert \"integers\" in param_dict\n assert \"optional_int\" in param_dict\n assert \"optional_float\" in param_dict\n assert \"tuple1\" in param_dict\n assert \"int_tuple\" in param_dict\n assert \"enum\" in param_dict\n assert \"readonly\" not in param_dict\n assert \"_non_override\" not in param_dict\n assert \"constant\" not in param_dict", "def isVwraysParameters(self):\n return True", "def check_raw_parameters(self):\n # exclude some irrelevant or mandatory parameters\n excluded_keys = (\"cmd\", \"client\", \"resource_group_name\", \"name\")\n # check whether the remaining parameters are set\n # the default value None or False (and other empty values, like empty string) will be considered as not set\n is_changed = any(v for k, v in self.context.raw_param.items() if k not in excluded_keys)\n\n # special cases\n # some parameters support the use of empty string or dictionary to update/remove previously set values\n is_default = (\n self.context.get_cluster_autoscaler_profile() is None and\n self.context.get_api_server_authorized_ip_ranges() is None and\n self.context.get_nodepool_labels() is None and\n self.context.get_nodepool_taints() is None\n )\n\n if not is_changed and is_default:\n reconcilePrompt = 'no argument specified to update would you like to reconcile to current settings?'\n if not prompt_y_n(reconcilePrompt, default=\"n\"):\n # Note: Uncomment the followings to automatically generate the error message.\n option_names = [\n '\"{}\"'.format(format_parameter_name_to_option_name(x))\n for x in self.context.raw_param.keys()\n if x not in excluded_keys\n ]\n error_msg = \"Please specify one or more of {}.\".format(\n \" or \".join(option_names)\n )\n raise RequiredArgumentMissingError(error_msg)", "def params_ok(): \n \n if parameters['details'].lower() in ['true', 'yes', '1']:\n parameters['details'] = True\n elif parameters['details'].lower() in ['false', 'no', '0']:\n parameters['details'] = False\n else:\n print 'unrecognized input for details = %s, so set details=False' % parameters['details']\n parameters['details'] = False\n\n if not parameters['db_tables']:\n parameters['db_tables'] = DB_TABLES\n\n # FIXME ideally, pre-check for tables on hosts here before diving in\n\n return True # params are OK; otherwise, we returned False above", "def testDefaults(self, widget):\n assert isinstance(widget.highlight, PythonHighlighter)\n assert isinstance(widget.parameter_dict, dict)\n assert isinstance(widget.pd_parameter_dict, dict)\n\n assert len(widget.model) == 6\n assert \"filename\" in widget.model.keys()\n assert \"overwrite\" in widget.model.keys()\n assert \"description\" in widget.model.keys()\n assert \"parameters\" in widget.model.keys()\n assert \"pd_parameters\" in widget.model.keys()\n assert \"text\" in widget.model.keys()", "def define_flags():\n define_flag = {\n 'boolean': flags.DEFINE_boolean,\n 'float': flags.DEFINE_float,\n 'integer': flags.DEFINE_integer,\n 'string': flags.DEFINE_string,\n }\n for name, param_spec in six.iteritems(proparams._DEFAULT_PARAMS):\n define_flag[param_spec.flag_type](name, param_spec.default_value, param_spec.description)\n flags.declare_key_flag(name)", "def test_validate_default_kw_only_follows_non_default_kw_only(self):\n fsig = FSignature(\n [forge.kwarg('a', default=None), forge.kwarg('b')],\n __validate_parameters__=False,\n )\n fsig.validate()", "def has_default(self):\r\n return self.default is not None", "def has_default_value(self):\n return self.default is not None", "def no_params(self) -> bool:\n result = True\n # Fixing issue #92\n if self.properties.parameters:\n return False\n else:\n return True\n # for parameter in self.properties.parameters:\n # if parameter == \"effect\":\n # continue\n # else:\n # result = False\n # break\n # return result", "def required(self) -> bool:\n return self._default is None", "def only_one_nondefault(args):\n defaults = 0 if args.defaults is None else len(args.defaults)\n if len(args.args) >= 1 and (len(args.args) - defaults <= 1):\n return True\n else:\n return False" ]
[ "0.7427541", "0.6925821", "0.65238214", "0.6483276", "0.6483276", "0.63574094", "0.63574094", "0.63574094", "0.6292153", "0.62191045", "0.61751884", "0.6142402", "0.6056118", "0.60462034", "0.60293055", "0.60282165", "0.6020094", "0.59459364", "0.5905384", "0.5896462", "0.5869404", "0.58252627", "0.58167976", "0.5807344", "0.58062404", "0.5784074", "0.578063", "0.57799816", "0.5771043", "0.57699025" ]
0.8273586
0
get a list of (timestamp, value) tuples, whenever the value changes. The first data point with nonzero timestamp is always included, messages with timestamp = 0 are ignored
def list_value_changes(self, field_name): t = self.data['timestamp'] x = self.data[field_name] indices = t != 0 # filter out 0 values t = t[indices] x = x[indices] if len(t) == 0: return [] ret = [(t[0], x[0])] indices = np.where(x[:-1] != x[1:])[0] + 1 ret.extend(zip(t[indices], x[indices])) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_timestamped_metric_values_as_strings(self):\n ret_list = []\n i = 0\n while i < len(self.__metric_value_list):\n ret_list.append(self.__metric_value_list[i].timestamp.strftime(\"%Y-%m-%d %H:%M:%S\") + \" \" +\n str(self.__metric_value_list[i].value) +\n \"(\" + str(self.__metric_value_list[i].metric_def_ID) + \")\")\n i += 1\n return ret_list", "def _get_datapoints(self, key: str, window_start_timestamp_s: float) -> List[float]:\n\n datapoints = self.data[key]\n\n idx = bisect.bisect(\n a=datapoints,\n x=TimeStampedValue(\n timestamp=window_start_timestamp_s, value=0 # dummy value\n ),\n )\n return datapoints[idx:]", "def get_sensor_changes(self, sensor_id=None):\n if sensor_id:\n changes = self.sensor_changes.get(sensor_id)\n if changes is None:\n return []\n return changes\n return list(itertools.chain(*self.sensor_changes.itervalues()))", "def update_timestamp(vector, data):\r\n vector = vector + data\r\n new_vector = []\r\n # loop through the vector and merged repeated timestamp for the same process by picking the higher value\r\n for i in range(len(vector)):\r\n if new_vector == [] :\r\n new_vector.append(vector[i])\r\n else:\r\n for j in range(len(new_vector)):\r\n if new_vector[j][0] == vector[i][0]:\r\n new_vector[j][1] = max(new_vector[j][1], vector[i][1])\r\n break\r\n elif j == len(new_vector) -1 :\r\n new_vector.append(vector[i])\r\n return new_vector", "def read_value(self, reads=1, _sleep=.1):\n values = []\n Result = namedtuple('Result', ['timestamp', 'fieldvalue', 'fieldname'])\n for count in range(reads):\n if reads > 1:\n sleep(_sleep)\n values.append(Result(datetime.now(), self.node.get_value(), self.clean_string))\n return values", "def get_data(index, row, col, vp_vec):\n time_array = array([])\n value_array = array([])\n last_t = 0\n for vp in vp_vec:\n (time, value) = vp.get_data(index, row, col)\n time_array = hstack([time_array, time + last_t])\n value_array = hstack([value_array, value])\n if (len(time_array) > 0):\n last_t = time_array.max()\n\n return (time_array, value_array)", "def timestamps():\n timestamps = ( # Index\n 1459516622.1, # 0\n 1459516622.2, # 1\n 1459516622.3, # 2\n 1459516623.0, # 3\n 1459516623.1, # 4\n 1459516623.3, # 5\n 1459516624.0, # 6\n )\n return timestamps", "def timestamps(self):\n return self.source.timestamps[self._time_keep]", "def timestamps(self) -> List[float]:\n return self._timestamps", "def timestamps(self) -> List[float]:\n return self._timestamps", "def timestamps(self) -> List[T]:\n return self._timestamps", "def update_data():\n values = temp_serial_placeholder()\n time = current_time_milli() - __start\n points = [ [time, values[0]], [time, values[1]] ]\n __data.append(points)\n return points", "def getChanges():", "def get_new_values(values):\n new_values = []\n new_value = values[0]\n for value in values:\n if value > new_value:\n new_value = value\n new_values.append(new_value)\n return new_values", "def timestamps_sorted_list(self) -> List[int]:\n if len(self._timestamps_sorted_list) == 0:\n # Need to sort\n self._timestamps_sorted_list = sorted(list(self.keys()))\n if len(self._timestamps_sorted_list) > 0:\n self._first_timestamp = self._timestamps_sorted_list[0]\n if len(self._timestamps_sorted_list) > 1:\n self._last_timestamp = self._timestamps_sorted_list[-1]\n return self._timestamps_sorted_list", "def get_value_history(self):\n return self.value_history", "def correct_off_seconds_same_minute(pump_log):\n output = []\n\n for row in pump_log:\n if row[0].second == 0 and row[0] == row[1]:\n row[1] = row[1] + datetime.timedelta(seconds=59)\n output.append([row[0], row[1]])\n else:\n output.append([row[0], row[1]])\n\n return output", "def get_change_in_val_over_bin(job, field, t0, t1, context):\n # the duration for which the job is active within the bin (<= bin duration), and the jobs global start time\n time_in_bin, start, _ = get_duration_of_job_within_bin(job, t0, t1)\n\n # the prev val of the field and the time the job had this value\n prev_val, prev_time = get_prev_value_from_cache(job[SpecialClassAds.JOB_ID], field, start, context)\n\n val_change_in_bin = linear_interpolate_value_change(\n prev_time, prev_val,\n context.current_time, job[field],\n time_in_bin)\n\n # TODO: DEBUG ###################################################################################################################################################################################################\n if (val_change_in_bin < 0):\n print \"STOP THE PRESSES! VAL CHANGE IN BIN IS NEGATIVE!\"\n print val_change_in_bin\n print \"job\", stringify(job)\n print \"field\", field\n print \"bin start\", t0\n print \"bin end\", t1\n print \"time in bend\", time_in_bin\n print \"job start\", start\n print \"prev time\", prev_time\n print \"prev val\", prev_val\n print \"current time\", context.current_time\n exit()\n\n\n return [val_change_in_bin, time_in_bin]", "def get_list(self):\n if self.key == 'L':\n return array_to_list([self.key, self.timing, self.data])\n if self.key == 'T':\n return array_to_list([self.key, self.data, self.timing])\n tmp_data = copy.deepcopy(self.data)\n for i in range(len(self.data)):\n if isinstance(self.data[i], float):\n tmp_data[i] = str('%.3f' % tmp_data[i])\n if tmp_data[i].split('.')[1] == '000':\n tmp_data[i] = tmp_data[i].split('.')[0]\n return array_to_list([self.key, self.easing, self.timing, tmp_data])", "def recorded_messages(self):\n messages = []\n for time in sorted(self.reception_records):\n messages.extend(self.reception_records[time])\n return messages", "def get_history(self):\n msg_ids = self._records.keys()\n # Remove any that do not have a submitted timestamp.\n # This is extremely unlikely to happen,\n # but it seems to come up in some tests on VMs.\n msg_ids = [m for m in msg_ids if self._records[m]['submitted'] is not None]\n return sorted(msg_ids, key=lambda m: self._records[m]['submitted'])", "def get_timestamps(self, dataset_name):\n return get_timestamps(self, dataset_name)", "def get_monitored_changes(self) -> List:\n pass", "def get_record_timestamps(records):\n if isinstance(records[0], MessageRecord):\n # UserMessageRecords generated before or after actual recording to disk,\n # or during a pause in recording, have a misleading TimeStamp of 0, so\n # instead use the DateTime for sorting MessageRecords in temporal order\n ts = np.asarray([ record.DateTime for record in records ])\n return ts\n try:\n ts = np.asarray([ record.TimeStamp for record in records ])\n except AttributeError:\n ts = np.asarray([ record['TimeStamp'] for record in records ])\n return ts", "def _get_packet_intervals(\n packets: Sequence[Packet],\n node: int,\n getter: Callable[[Packet, int], float]\n) -> np.ndarray:\n prev_time = 0.0\n intervals = []\n for packet in packets:\n if packet.was_served[node]:\n new_time = getter(packet, node)\n intervals.append(new_time - prev_time)\n prev_time = new_time\n return np.asarray(intervals)", "def events(time):\n\n event_list = eventlist()\n idx = np.all(time == event_list[:, 0:len(time)], axis=1)\n return event_list[idx,:]", "def _valueList(self, key, year, month=None, day=None, hour=None, status='1', metaData=None):\n vals = [self.timestamp]\n if (key is not None):\n vals.append(\"'{}'\".format(key))\n if (year is not None):\n vals.append(str(year))\n if (month is not None):\n vals.append(str(month))\n if (day is not None):\n vals.append(str(day))\n if (hour is not None):\n vals.append(str(hour))\n if (status is not None):\n vals.append(str(status))\n if (metaData is not None):\n vals.append(\"'{}'\".format(meta))\n\n return '(' + ', '.join(vals) + ')'", "def timeStamps(dataset):\n \n timestamps = []\n \n for index, row in enumerate(dataset):\n try:\n timeObj = datetime.datetime.strptime(timeStampFix(row), '%y:%j:%H:%M:%S')\n except ValueError:\n print('Failed to create datetime object for ' + timeStampFix(row))\n timestamps.append(timeObj)\n \n return timestamps", "def group_consecutive_values(values, threshold=2):\n run = []\n result = [run]\n last = values[0]\n for v in values:\n if v-last <= threshold:\n run.append(v)\n else:\n run = [v]\n result.append(run)\n last = v\n return result", "def event_based_log_a(value):\n\n # significant difference between readings\n threshold = 0.1\n\n # if the readings are steady, we want to measure slowly\n regular_meas_interval = \"00:15:00\"\n\n # if there is a significant difference, we want to measure more frequently\n fast_meas_interval = \"00:00:10\"\n\n # how often to log regardless of changes in readings\n minimum_logging_interval_sec = sl3_hms_to_seconds(\"04:00:00\")\n\n # grab the previously logged value\n prev = prev_logged_value(index())\n if prev.quality == 'G':\n diff = abs(prev.value - value)\n if diff >= threshold:\n # log this value and increase the measurement interval\n meas_log_this()\n setup_write(\"M{} meas interval\".format(index()), fast_meas_interval)\n else:\n # slow down the measurement interval\n setup_write(\"M{} meas interval\".format(index()), regular_meas_interval)\n\n # if we have not logged in a while. log now\n if (time_scheduled() - prev.time) >= minimum_logging_interval_sec:\n meas_log_this()\n else:\n meas_do_not_log()\n\n return value" ]
[ "0.5824866", "0.5726083", "0.56159997", "0.54942083", "0.54706985", "0.54493475", "0.5436982", "0.54328585", "0.5425895", "0.5425895", "0.5419897", "0.5354519", "0.53296113", "0.5266226", "0.52593285", "0.5251129", "0.52483207", "0.5224536", "0.5218457", "0.52090126", "0.51948524", "0.5155354", "0.5135135", "0.51217085", "0.51142997", "0.51022536", "0.5102075", "0.5097073", "0.50283587", "0.50271136" ]
0.77215576
0
add a _MessageParameterDefault object
def _add_parameter_default(self, msg_param): default_types = msg_param.default_types while default_types: # iterate over each bit def_type = default_types & (~default_types+1) default_types ^= def_type def_type -= 1 if def_type not in self._default_parameters: self._default_parameters[def_type] = {} self._default_parameters[def_type][msg_param.key] = msg_param.value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_default_params(self, params):\n params['key'] = self.key\n params['format'] = self.format\n #params['unique_id'] = generate_unique_id()\n return params", "def create_object_parameter_from_default(obj, default):\n values = []\n if default.enum:\n for v in DefaultParameterVl.objects.filter(parameter=default).all():\n values.append({'value' : v.value,\n 'caption' : v.caption})\n return create_object_parameter(obj, 'user', False,\n tp = default.tp,\n name=default.name,\n descr=default.descr,\n values=values)", "def set_default_parameters(self):\n super().set_default_parameters()", "def parameter(self, name, doc, default = None):\n self._parameters.append((name, doc.strip(), default))\n return self", "def add_parameter(self, paramId, dataType, default, valRange=None, label=\"Parameter\"):\n self.inputs[paramId] = {\n 'label': label,\n 'entry': None,\n 'value': None,\n 'valRange': valRange,\n 'dataType': dataType,\n 'default': default\n }", "def _set_params(self, params, defaults):\n new_params = OrderedDict(\n zip(params, [x if isinstance(x, Parameter) else Parameter() for x in defaults])\n )\n for key, value in self._src.items():\n if key in new_params:\n new_params[key] = value\n\n self._src = new_params", "def default_hparams():\n raise NotImplementedError('Not implemented')", "def _register_global_params(self, params):\n\n for name,obj in self.params().items():\n global_params.add(**{name:obj})\n\n for name,val in params.items():\n global_params.params(name).default=val\n\n params.update(global_params.get_param_values())\n params[\"name\"]=self.name", "def parameters_default(cls):\n return cls._Parameters.__new__.__defaults__", "def test_defaults(self):\n fparam = FParameter(POSITIONAL_ONLY)\n assert fparam.kind == POSITIONAL_ONLY\n for k, v in FPARAM_DEFAULTS.items():\n assert getattr(fparam, k) == v", "def _default_value(self, addr, size, name=None, inspect=True, events=True, key=None, **kwargs):\n pass", "def default_parameters():\n return BackendNSParameters()", "def argument(arg, default):\n return \"{0}={1}\".format(arg, default) if default else arg", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def bind_with_defaults(self, *args, **kwargs):\n\t\targs = super().bind_partial(*args, **kwargs).arguments\n\t\targs.update({elem: self.defaults[elem] if elem in self.defaults else self.default\n\t\t\t\t\t\t\tfor elem in sig.parameters if elem not in args})\n\t\treturn super().bind(**args)", "def update_param(param, param_dict, alg=\"IID_LINEAR\", prefix=\"\"):\n default_len = len(param.defaults)\n if param.defaults:\n for index, value in enumerate(reversed(param.args)):\n if value not in [\"self\", \"W\", \"method\", \"causal_matrix\", \"topology_matrix\"]:\n if index < default_len:\n p_value = list(reversed(param.defaults))[index]\n else:\n p_value = None\n if value is \"sem_type\":\n p_value = sem_type_set(\"sem_type\", alg)[0]\n param_dict.update({prefix + value: p_value})", "def addDefault(self, name, object):\n if name is None:\n raise ValueError(\"Name cannot be None\")\n self.defaultChoice = name\n self.addObject(name, object)", "def default_hparams():\n return {\n \"value\": 0.,\n \"name\": \"constant_connector\"\n }", "def _add_setup_param(self, name, fmt, **kwargs):\n self._param_dict.add(name, '', None, fmt,\n startup_param=False,\n direct_access=True,\n visibility=ParameterDictVisibility.READ_ONLY,\n **kwargs)", "def f_default(self, default = 1) :\n pass", "def test_defaults(self):\n params = DefaultsInterface()\n # make sure from_param_server can be called repeatedly\n params.from_param_server()\n\n self.assertEqual(params.verbosity_param_w_default, 'info')\n\n self.assertEqual(params.int_param_w_default, 1)\n self.assertAlmostEqual(params.double_param_w_default, 1.1)\n self.assertEqual(params.str_param_w_default, \"Hello World\")\n self.assertEqual(params.bool_param_w_default, True)\n self.assertEqual(params.long_param_w_default_int, 1)\n self.assertEqual(params.long_param_w_default_int_str, -1)\n self.assertEqual(params.long_param_w_default_long_string, 9223372036854775807)\n\n self.assertEqual(params.vector_int_param_w_default, [1, 2, 3])\n self.assertEqual(params.vector_double_param_w_default, [1.1, 1.2, 1.3])\n self.assertEqual(params.vector_string_param_w_default, [\"Hello\", \"World\"])\n\n self.assertEqual(params.map_param_w_default, {\"Hello\": \"World\"})\n self.assertEqual(params.enum_int_param_w_default, 1)\n self.assertEqual(params.enum_str_param_w_default, \"One\")", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(ModifyParametersRequest, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.major_ax is None:\n self.major_ax = 0.\n if self.minor_ax is None:\n self.minor_ax = 0.\n if self.coup_strength is None:\n self.coup_strength = 0.\n if self.limit_cycle is None:\n self.limit_cycle = 0.\n if self.forward_velocity is None:\n self.forward_velocity = 0.\n if self.curvature is None:\n self.curvature = 0.\n if self.x_offset is None:\n self.x_offset = []\n if self.y_offset is None:\n self.y_offset = []\n if self.coupling_1 is None:\n self.coupling_1 = []\n if self.coupling_2 is None:\n self.coupling_2 = []\n if self.coupling_3 is None:\n self.coupling_3 = []\n if self.coupling_4 is None:\n self.coupling_4 = []\n if self.coupling_5 is None:\n self.coupling_5 = []\n if self.coupling_6 is None:\n self.coupling_6 = []\n else:\n self.major_ax = 0.\n self.minor_ax = 0.\n self.coup_strength = 0.\n self.limit_cycle = 0.\n self.forward_velocity = 0.\n self.curvature = 0.\n self.x_offset = []\n self.y_offset = []\n self.coupling_1 = []\n self.coupling_2 = []\n self.coupling_3 = []\n self.coupling_4 = []\n self.coupling_5 = []\n self.coupling_6 = []", "def default_parameters(name):\n prm = Parameters(name)\n\n prm.add('total_volume', 5000.0) # Not important for non-closed loop. Included for compatibility.\n\n prm.add('venous_pressure', float())\n\n prm.add('arterial_compliance', float())\n\n prm.add('venous_resistance', float())\n prm.add('arterial_resistance', float())\n prm.add('peripheral_resistance', float())\n\n return prm", "def param(default: Any = None, name: Optional[str] = None,\n stream_suffix: bool = False, init: bool = True, skip: bool = False,\n render: Optional[Callable[[Any], Any]] = None\n ) -> Any:\n metadata = {\n 'name': name,\n 'stream_suffix': stream_suffix,\n 'skip': skip,\n 'render': render\n }\n if callable(default):\n return field(default_factory=default, init=init, metadata=metadata)\n else:\n return field(default=default, init=init, metadata=metadata)", "def _get_default_parameters(new_values):\n no_default = [\"BEAM\", \"TYPE\", \"ERRORDEF\", \"CORRECTIONS\"]\n\n not_found = [nf for nf in no_default if nf not in new_values]\n if any(not_found):\n raise ValueError(\"Required parameters '{}' not found.\".format(not_found))\n\n # Some defaults\n default = {\n # Beam Parameters\n \"QX\": \"62.31\",\n \"QY\": \"60.32\",\n \"CHROMX\": \"3\",\n \"CHROMY\": \"3\",\n # Settings\n \"USETHIN\": \"1\",\n \"ARCERRORS\": \"0\",\n \"CALCCORRECTIONS\": \"1\",\n # Outputs\n \"NOMINALMACHINE\": \"\",\n \"ARCAPPLIED\": \"\",\n \"MQXAPPLIED\": \"\",\n \"MBIPAPPLIED\": \"\",\n \"ALLAPPLIED\": \"\",\n \"CORRECTED\": \"\",\n }\n\n # crossing angles and separation bumps\n for idx in [1,2,5,8]:\n for prefix in [\"XING\", \"SEP\", \"PHI\"]:\n default[\"{:s}{:d}\".format(prefix, idx)] = \"0\"\n\n # applied errors\n for idx in range(1, 12):\n for orientation in [\"A\", \"B\"]:\n default[\"{:s}{:d}\".format(orientation, idx)] = \"0\"\n\n # return dictionary filled with defaults and new values\n default.update(new_values)\n return default", "def __init__(self, customMessage=\"custom message\"):\n self.customMessage = customMessage", "def default_parameters(name):\n prm = Parameters(name)\n\n prm.add('venous_compliance', float())\n prm.add('arterial_compliance', float())\n\n prm.add('venous_resistance', float())\n prm.add('arterial_resistance', float())\n prm.add('peripheral_resistance', float())\n\n prm.add('venous_resting_volume', float())\n prm.add('arterial_resting_volume', float())\n\n return prm", "def test_default_argument(self):\n @converters.wrap\n def inner_test(param: int = 5):\n \"\"\"Make sure the default was used.\"\"\"\n self.assertEqual(param, 5)\n inner_test()", "def addParameter(cTag, name, value): #@NoSelf" ]
[ "0.6885487", "0.6562944", "0.6430674", "0.624202", "0.6017792", "0.6001936", "0.5993875", "0.5911069", "0.5879578", "0.58791137", "0.58579123", "0.58325547", "0.5811331", "0.5731214", "0.5731214", "0.5729596", "0.5720812", "0.57182", "0.57125837", "0.56727165", "0.5632015", "0.5628203", "0.5617232", "0.55754626", "0.55649173", "0.5559012", "0.5556806", "0.55543476", "0.5517585", "0.5506984" ]
0.81283206
0
add a message info multiple to self._msg_info_multiple_dict
def _add_message_info_multiple(self, msg_info): if msg_info.key in self._msg_info_multiple_dict: if msg_info.is_continued: self._msg_info_multiple_dict[msg_info.key][-1].append(msg_info.value) else: self._msg_info_multiple_dict[msg_info.key].append([msg_info.value]) else: self._msg_info_multiple_dict[msg_info.key] = [[msg_info.value]] self._msg_info_multiple_dict_types[msg_info.key] = msg_info.type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def msg_info_multiple_dict(self):\n return self._msg_info_multiple_dict", "def add_info(self, info_message: dict):\n self.info.update(info_message)", "def _add_info(self, msg, **kwargs):\n\n args, extensions = self._filter_args(msg, **kwargs)\n for key, val in args.items():\n setattr(msg, key, val)\n\n if extensions:\n if msg.extension_elements:\n msg.extension_elements.extend(extensions)\n else:\n msg.extension_elements = extensions", "def massage_addinfo(self) -> str:\n self.message_str= \"{}, {}\\n\".format(self.sent_by, self.time)", "def _add_info(self, infos: dict, info: dict, env_num: int) -> dict:\n for k in info.keys():\n if k not in infos:\n info_array, array_mask = self._init_info_arrays(type(info[k]))\n else:\n info_array, array_mask = infos[k], infos[f\"_{k}\"]\n\n info_array[env_num], array_mask[env_num] = info[k], True\n infos[k], infos[f\"_{k}\"] = info_array, array_mask\n return infos", "def massage_addinfo(self) -> str:\n self.message_str = f'{self.time}\\n{self.sent_by}\\n'\n return self.message_str", "def _addFinalInfoMsg():\n controller.MESSAGES.append(output_messages.INFO_LOG_FILE_PATH%(logFile))\n controller.MESSAGES.append(output_messages.INFO_LOGIN_USER)\n controller.MESSAGES.append(output_messages.INFO_ADD_USERS)", "def msg_info_dict(self):\n return self._msg_info_dict", "def MultiMessage(self, *args, **kwargs):\n pass", "def _set_search_additional_message(self):\n if self._report_data and self._report_data['details']:\n for detail in self._report_data['details']:\n messages = []\n has_exempt_note: bool = False\n if detail.get('location'):\n if detail['location'].get('leaveProvince') and detail['status'] == 'EXEMPT':\n messages.append({'messageType': 'OUT_PROV'})\n if detail.get('description') and detail['description'].get('sections'):\n sections = detail['description'].get('sections')\n for section in sections:\n if section.get('widthFeet', 0) >= 16:\n messages.append({'messageType': 'WIDTH'})\n break\n if detail.get('notes'):\n for note in detail['notes']:\n if detail['status'] == 'CANCELLED' and note.get('documentType', '') == 'REGC':\n messages.append({'messageType': 'REGC'})\n elif note.get('documentType', '') in ('EXRS', 'EXNR') and note.get('createDateTime'):\n has_exempt_note = True\n message = {\n 'messageType': note.get('documentType'),\n 'messageId': note.get('documentRegistrationNumber', ''),\n 'messageDate': Report._to_report_datetime(note['createDateTime'], False)\n }\n messages.append(message)\n if not has_exempt_note and detail.get('status') == 'EXEMPT':\n message = {'messageType': 'EXEMPT'}\n messages.append(message)\n if messages:\n detail['messages'] = messages", "def add_message(messages, key, message):\n if key in messages:\n messages[key].append(message)\n else:\n messages[key] = [message]", "def addInfo(self, **data):\n for key, value in viewitems(data):\n # assumption: value is not iterable (list, dict, tuple, ...)\n # using unicode sandwich pattern\n key = decodeBytesToUnicode(key, \"ignore\")\n value = decodeBytesToUnicode(value, \"ignore\")\n self.data[key] = value\n return", "def set_info_message(msg):\n set_message(msg, TYPE_INFO)", "def store_img_infos(self, msg):\n # msg is technically a ConsumerRecord that is a collections.namedtuple, see:\n # https://github.com/dpkp/kafka-python/blob/master/kafka/consumer/fetcher.py#L30\n strk = str(msg['sha1'])\n self.dict_sha1_infos[strk] = dict()\n for key in msg:\n # dumps json of 'img_info'\n # We actually need that only for DIG...\n if key == \"img_info\":\n self.dict_sha1_infos[strk][key] = json.dumps(msg[key])\n else:\n # discard 'img_buffer' (if it exists?...), and 'sha1'\n # if k != \"img_buffer\" and k != \"sha1\":\n # self.dict_sha1_infos[strk][k] = msg[k]\n # discard 'sha1'\n if key != \"sha1\":\n self.dict_sha1_infos[strk][key] = msg[key]", "def add_info(transmogrifier, category, section, info):\n try:\n a = transmogrifier._collected_info\n except AttributeError:\n a = []\n transmogrifier._collected_info = a\n a.append({'category': category,\n 'section': section,\n 'info': info,\n })", "def handle_message(self, msg):\n self.messages.append({\n 'type': msg.category,\n 'module': msg.module,\n 'obj': msg.obj,\n 'line': msg.line,\n 'column': msg.column,\n 'path': msg.path,\n 'symbol': msg.symbol,\n 'message': msg.msg,\n 'message-id': msg.msg_id,\n })", "def addMessage():\n\ttmp = [[] for x in GLOBALS.SUPER_PEER_LIST]\n\tglobal response\n\tglobal num\n\tif num != 0:\n\t\tresponse.append([])\n\tfor x in GLOBALS.SUPER_PEER_LIST:\n\t\tresponse[num].append(tmp)\n\tnum +=1", "def _add_details(self, info):\n for (key, val) in six.iteritems(info):\n if key == \"nodes\":\n val = [Node(parent=self, **nd) for nd in val]\n elif key == \"sessionPersistence\":\n val = val['persistenceType']\n elif key == \"cluster\":\n val = val['name']\n elif key == \"virtualIps\":\n key = \"virtual_ips\"\n val = [VirtualIP(parent=self, **vip) for vip in val]\n setattr(self, key, val)", "def info(self, dict):\n self._info.update(dict)", "def message_info(self, m):\n self.message(m, logging.INFO)", "def massage_addinfo(self) -> str:\n super().massage_addinfo()\n self.message_str += f'{self.location}\\n'", "def add_infos(self, *keyvals):\r\n infos = dict(keyvals)\r\n with open(self._info_file, 'a') as outfile:\r\n for key, val in infos.items():\r\n key = key.strip()\r\n val = str(val).strip()\r\n if ':' in key:\r\n raise Exception, 'info key must not contain a colon'\r\n outfile.write('%s: %s\\n' % (key, val))\r\n self._info[key] = val\r\n return infos", "def create_message_event(self, messaging):\n event = {**self.default_event}\n event['entry'][-1]['messaging'].append(messaging)\n return event", "def add_message(self, msg):\n self.messages.append(msg)", "def append_message(self, message):\n if message['message_id'] in self.message_ids:\n return\n self.message_ids.append(message['message_id'])\n self.messages.append(message)", "def _add_message(self, message, level, code=None):\n\n timestamp = str(datetime.datetime.now().isoformat())\n pid = os.getpid()\n self.messages.append( { 'timestamp': timestamp, 'level': self.level_names[level], 'code': code, 'message': message } )\n self.n_messages += 1\n\n # Create a pretty printable message prefix\n prefix = f\"{timestamp} {self.level_names[level]}: ({pid}) \"\n if code is not None:\n prefix += f\"[{code}] \"\n\n if self.output is not None:\n if self.output == 'STDOUT':\n print(f\"{prefix}{message}\", flush=True)\n if self.output == 'STDERR':\n eprint(f\"{prefix}{message}\", flush=True)", "def add_message_by_shared_flight_data(self, shared_flight_data):\n self.message_list.append(shared_flight_data.get(\"ATC\").get_message())", "def add_info(info):\n self._info[get_test_name(request)].append(info)", "def add_info_string(self, key_addr, info):\n\n if key_addr not in self._role_strings_info:\n self._role_strings_info[key_addr] = []\n if info not in self._role_strings_info[key_addr]:\n self._role_strings_info[key_addr].append(info)", "def __init__(self, users, messages):\n self.users = users\n self.messages = messages\n # addd dict with each user as key and there corresponidn gmessages" ]
[ "0.7767071", "0.6895153", "0.6687446", "0.6489464", "0.6157023", "0.59883577", "0.58090824", "0.576598", "0.57619935", "0.57111245", "0.56639373", "0.56390464", "0.55331975", "0.55028325", "0.55022395", "0.54579544", "0.5442484", "0.54279864", "0.53858125", "0.53597647", "0.52809423", "0.5272721", "0.5255705", "0.5225815", "0.5193099", "0.5188064", "0.5184503", "0.5180435", "0.5172112", "0.51609325" ]
0.87783754
0
read the file from a given location until the end of sync_byte sequence is found or an end condition is met(reached EOF or searched all last_n_bytes).
def _find_sync(self, last_n_bytes=-1): sync_seq_found = False initial_file_position = self._file_handle.tell() current_file_position = initial_file_position search_chunk_size = 512 # number of bytes that are searched at once if last_n_bytes != -1: current_file_position = self._file_handle.seek(-last_n_bytes, 1) search_chunk_size = last_n_bytes chunk = self._file_handle.read(search_chunk_size) while len(chunk) >= len(ULog.SYNC_BYTES): current_file_position += len(chunk) chunk_index = chunk.find(ULog.SYNC_BYTES) if chunk_index >= 0: if self._debug: print("Found sync at %i" % (current_file_position - len(chunk) + chunk_index)) # seek to end of sync sequence and break current_file_position = self._file_handle.seek(current_file_position - len(chunk)\ + chunk_index + len(ULog.SYNC_BYTES), 0) sync_seq_found = True break if last_n_bytes != -1: # we read the whole last_n_bytes and did not find sync break # seek back 7 bytes to handle boundary condition and read next chunk current_file_position = self._file_handle.seek(-(len(ULog.SYNC_BYTES)-1), 1) chunk = self._file_handle.read(search_chunk_size) if not sync_seq_found: current_file_position = self._file_handle.seek(initial_file_position, 0) if last_n_bytes == -1: self._has_sync = False if self._debug: print("Failed to find sync in file from %i" % initial_file_position) else: if self._debug: print("Failed to find sync in (%i, %i)" %\ (initial_file_position - last_n_bytes, initial_file_position)) else: # declare file corrupt if we skipped bytes to sync sequence self._file_corrupt = True return sync_seq_found
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_until(self, c, chunk_size=96):\n s = io.BytesIO()\n fp = self._fp\n eof = False\n\n while True:\n chunk = fp.read(chunk_size)\n\n if not chunk:\n # The end of the file was reached. We'll bail out of the loop\n # and return everything we've read so far.\n eof = True\n break\n\n i = chunk.find(c)\n\n if i == -1:\n # We didn't find the character. Store the entire chunk.\n s.write(chunk)\n else:\n # We found the character. Store everything up to and including\n # it, and then go back in the stream for the next read.\n s.write(chunk[:i + 1])\n fp.seek(i + 1 - len(chunk), os.SEEK_CUR)\n break\n\n result = s.getvalue()\n s.close()\n\n return result, eof", "def read_until(steg_bytes: bytes, offset: int, ending: str):\r\n # Create a variable to hold the bytes read\r\n bytes_read = b\"\"\r\n\r\n # Loop through the steg_bytes\r\n while offset < len(steg_bytes):\r\n # Check if the current byte is the ending byte sequence\r\n if steg_bytes[offset:offset + len(ending)] == ending.encode():\r\n # Return the bytes read and the offset of the ending byte sequence\r\n return bytes_read, offset\r\n # Read the next byte\r\n bytes_read += steg_bytes[offset:offset + 1]\r\n offset += 1", "def _readuntil(f, end=_TYPE_END):\n\tbuf = bytearray()\n\twhile True:\n\t\tbyte = f.read(1)\n\t\tif byte != end:\n\t\t\tbuf += byte\n\t\telse:\n\t\t\tbreak\n\treturn buf", "async def collect_file(self, file):\n with open(file, \"wb\") as f: # Opening file as writable in bytes\n\n total_bytes = 0\n while True:\n self.reader._eof = False # Force to read\n data = await self.reader.read(1024)\n\n # Get buffer using BytesIO\n chunk = io.BytesIO(data)\n\n total_bytes += chunk.getbuffer().nbytes\n\n # last_four = data[:-4].decode(\"utf-8\")\n # last_four = chunk.getvalue()[-1:]\n last_four = chunk.getvalue()\n # print((last_four))\n\n check = \"end\"\n\n # print(check.encode())\n\n if last_four == check.encode():\n print(\"Not Data\")\n break\n\n f.write(chunk.getvalue())\n print(f\"Collected: {total_bytes!r} bytes\")", "def _filesync_read_buffered(self, size, adb_info, filesync_info):\n # Ensure recv buffer has enough data.\n while len(filesync_info.recv_buffer) < size:\n _, data = self._read_until([constants.WRTE], adb_info)\n filesync_info.recv_buffer += data\n\n result = filesync_info.recv_buffer[:size]\n filesync_info.recv_buffer = filesync_info.recv_buffer[size:]\n return result", "def find_file_end(chunks, pos):\n\n\tpos = pos + 1\n\twhile pos < len(chunks)-1:\n\n\t\tif chunks[pos][0] != 0x100 and chunks[pos][0] != 0x102:\n\n\t\t\t# This is not a block\n\t\t\treturn pos\n\n\t\telse:\n\t\t\tpos = pos + 1\n\n\treturn pos", "def _read_bytes(self, start, num_bytes):\n with self._fp_lock:\n self._fp.seek(start)\n return self._fp.read(num_bytes)", "async def readchunk(self) -> Tuple[bytes, bool]:\n ...", "def state_readChunk(self, fileObj):\n if not self.reader.next(fileObj, self.destination):\n self.readResponse = self.state_finalStatus", "def _read(self, file_service):\n self.is_reading_lock.acquire()\n while self.is_reading:\n # is_reading_lock guards is_reading which is condition of the while loop.\n # We want to allow is_reading to be changed during the body of the while\n # so we release it in the beginning of the loop.\n self.is_reading_lock.release()\n self.data += self.stream.read(self.chunk_size)\n self.bytes_read = len(self.data)\n\n if self._is_last_chunk(self.bytes_read, self.previous_read):\n file_service.store_bytes(\n self.stream_id, self.data[:self.bytes_read], datetime.now())\n self.data = self.data[self.bytes_read:]\n\n elif self.bytes_read >= self.chunk_size:\n file_service.store_bytes(\n self.stream_id, self.data[:self.chunk_size], datetime.now())\n self.data = self.data[self.chunk_size:]\n\n self.previous_read = self.bytes_read\n\n # Acquire the lock before checking is_reading again,\n # so we acquire it at the end of the loop.\n self.is_reading_lock.acquire()\n\n # Store any leftover data\n if len(self.data) != 0:\n file_service.store_bytes(self.stream_id, self.data, datetime.now())\n\n self.is_reading_lock.release()", "def read_until(self, data):\n\n while not data in self.buff:\n self.buff += self.socket.recv(1024)\n \n pos = self.buff.find(data)\n rval = self.buff[:pos + len(data)]\n self.buff = self.buff[pos + len(data):]\n \n return rval", "def read_until(self, data):\n\n while not data in self.buff:\n self.buff += self.socket.recv(1024)\n \n pos = self.buff.find(data)\n rval = self.buff[:pos + len(data)]\n self.buff = self.buff[pos + len(data):]\n \n return rval", "def read_until_eof(self):\n with self.reading:\n try:\n while True:\n self.read_buffer.enqueue((yield self.base.read(self.bufsize)))\n except BrokenPipeError:\n pass\n do_return(self.read_buffer.dequeue())", "def read_until(\n self,\n min_num_bytes: int,\n ending: bytes,\n timeout: float = 10.0,\n data_consumer=None,\n ):\n\n data = b''\n\n # If a miniumum number of bytes is given, wait till at least\n # that number of bytes are received. If the value is 0, then\n # continue, and rely on the terminator and timeout values.\n if min_num_bytes:\n data = self.con.read(min_num_bytes)\n # debug(f'read {data=}')\n if data_consumer:\n data_consumer(data)\n\n timeout_count = 0\n while True:\n if ending and data.endswith(ending):\n break\n else:\n # debug(f\"{ending=} was not found\")\n pass\n\n if self.con.inWaiting() > 0:\n new_data = self.con.read(1)\n # debug(f'read {new_data=}')\n data = data + new_data\n # if len(data) > 80:\n # debug(f'data: len={len(data)} {data[-80:]=}')\n # else:\n # debug(f'data: len={len(data)} {data=}')\n if data_consumer:\n data_consumer(new_data)\n # timeout_count = 0\n else:\n timeout_count += 1\n # debug(f'{timeout_count=}')\n if timeout is not None and timeout_count >= 100 * timeout:\n if not data:\n debug(f\"TIMEOUT - No data received within {timeout} seconds\")\n else:\n debug(f\"TIMEOUT - data {data} did not end with {ending}\")\n break\n time.sleep(0.01)\n debug(f\"read_until returns {data=}\")\n return data", "def read_until(self, data):\n\n while not data in self.buff:\n self.buff += self.socket.recv(1024)\n\n pos = self.buff.find(data)\n rval = self.buff[: pos + len(data)]\n self.buff = self.buff[pos + len(data) :]\n\n return rval", "def _read_nowait(self, n: int) -> bytes:\n ...", "def read_block(file, block_size):\n block = b\"\"\n for i in range(block_size):\n this_byte = file.read(1)\n # If the last block consumed the last char in file:\n if this_byte == b\"\" and i == 0:\n return (-1, False)\n # If we reach EOF prematurely:\n elif this_byte == b\"\":\n block += chr(0).encode()*(block_size - i)\n return (block, False)\n else:\n block += this_byte\n return (block, True)", "def read_until(self, s, timeout=None):\n self.read_cond(lambda x: s in x.buf, timeout)\n end = self.buf.find(s) + len(s)\n res = self.buf[:end]\n self.buf = self.buf[end:]\n return res", "def file_sync_read_chunks(radosobject, chunksize, nr, offset=0):\n radosobject.seek(offset * chunksize)\n while nr:\n remains = chunksize\n chunk = ''\n while 1:\n s = radosobject.sync_read(remains)\n if not s:\n if chunk:\n yield chunk\n return\n chunk += s\n remains -= len(s)\n if remains <= 0:\n break\n yield chunk\n nr -= 1", "def read_file(self, general_socket, file_name, file_size):\n\n try:\n with open(os.path.join(self.data, file_name), \"xb\") as file:\n recv_bytes = 0\n while recv_bytes < file_size:\n file_data = general_socket.recv(262144)\n if not file_data: break\n\n recv_bytes += len(file_data)\n progress_bar(self, recv_bytes, file_size, \"Reading File\")\n\n file.write(file_data)\n except socket.error as soe:\n self.LOGGER.info(soe)\n return False, soe\n except Exception as exp:\n self.LOGGER.unknown_error(exp)\n return False, exp\n else:\n return True, True", "async def read_chunk(self, size: int = ...) -> bytes:\n ...", "def variant_call_single_end(sam_file):\n\n\ttotal_reads_number = wccount(sam_file)\n\tpercentage_of_total_file = 0\n\n\tchr_seq = get_ref_geno(chr_name)\n\n\tglobal table_name\n\tcon = lite.connect(db_name)\n\twith con:\n\t\tcur = con.cursor()\n\n\t\tinputfile_sam = open(currentPath + sam_file, \"r\")\n\t\tsam_line_first = inputfile_sam.readline() # the first read line in a pair\n\t\ttotal_reads_num = 0\n\t\tcovered_snp_total_number = 0\n\n\t\tinsert_size_lower_bond = 0\n\t\tinsert_size_upper_bond = 1000\n\n\t\twhile sam_line_first != '':\n\t\t\tif not sam_line_first.startswith(\"@\"):\n\t\t\t\tcurrent_percent = int(float(total_reads_number * percentage_of_total_file) / 100)\n\t\t\t\tif total_reads_num == current_percent:\n\t\t\t\t\tprint \"current progress: \", percentage_of_total_file\n\t\t\t\t\tpercentage_of_total_file += 10\n\n\t\t\t\ttotal_reads_num += 1\n\t\t\t\telements_first = sam_line_first.strip().split()\n\t\t\t\ttry:\n\t\t\t\t\tread_ID_first = elements_first[0].strip()\n\t\t\t\t\tchrName_first = elements_first[2].strip()\n\t\t\t\t\tinsert_size_first = abs(int(elements_first[8].strip())) # insert_size for second read is negative\n\t\t\t\texcept:\n\t\t\t\t\tprint \"error in first read:\", sam_line_first\n\t\t\t\t#print \"this is a new read\"\t\n\t\t\t\tif (insert_size_first >= insert_size_lower_bond) and (insert_size_first <= insert_size_upper_bond):\n\t\t\t\t\tif True:\n\t\t\t\t\t\tif chrName_first.startswith(chr_name):\n\t\t\t\t\t\t\t# first read\n\t\t\t\t\t\t\tqName_first = elements_first[0].strip()\n\t\t\t\t\t\t\tflag_first = elements_first[1].strip()\n\t\t\t\t\t\t\tstart_position_first = int(elements_first[3].strip())\n\t\t\t\t\t\t\tread_sequence_first = elements_first[9].strip()\n\t\t\t\t\t\t\tread_length_first = len(read_sequence_first)\n\t\t\t\t\t\t\tquality_score_sequence_first = elements_first[10].strip()\n\n\t\t\t\t\t\t\tif len(read_sequence_first)\t== len(quality_score_sequence_first):\n\t\t\t\t\t\t\t\tfor i in range(read_length_first):\n\t\t\t\t\t\t\t\t\tcurrent_base_position = start_position_first + i\n\t\t\t\t\t\t\t\t\tA_depth = 0\n\t\t\t\t\t\t\t\t\tT_depth = 0\n\t\t\t\t\t\t\t\t\tC_depth = 0\n\t\t\t\t\t\t\t\t\tG_depth = 0\n\n\t\t\t\t\t\t\t\t\tcovered_snp = read_sequence_first[i] # ith position is the covered snp\n\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\tquality_score_symbol = quality_score_sequence_first[i]\n\t\t\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\t\t\tprint \"error in\", sam_line_first\n\t\t\t\t\t\t\t\t\t\tquality_score_symbol = 'N'\n\t\t\t\t\t\t\t\t\tif (not covered_snp == 'N') and (\n\t\t\t\t\t\t\t\t\t\t(ord(quality_score_symbol) - 33) > quality_score_threshold): # check quality_score\n\t\t\t\t\t\t\t\t\t\tif covered_snp == \"A\":\n\t\t\t\t\t\t\t\t\t\t\tA_depth += 1\n\t\t\t\t\t\t\t\t\t\telif covered_snp == \"T\":\n\t\t\t\t\t\t\t\t\t\t\tT_depth += 1\n\t\t\t\t\t\t\t\t\t\telif covered_snp == \"C\":\n\t\t\t\t\t\t\t\t\t\t\tC_depth += 1\n\t\t\t\t\t\t\t\t\t\telif covered_snp == \"G\":\n\t\t\t\t\t\t\t\t\t\t\tG_depth += 1\n\n\t\t\t\t\t\t\t\t\t\tcur.execute(\"SELECT * from \" + table_name + \" where position=\" + str(\n\t\t\t\t\t\t\t\t\t\t\tcurrent_base_position))\n\t\t\t\t\t\t\t\t\t\trow = cur.fetchone()\n\t\t\t\t\t\t\t\t\t\tif row == None:\n\t\t\t\t\t\t\t\t\t\t\tinset_querry = \"INSERT INTO \" + table_name + \\\n\t\t\t\t\t\t\t\t\t\t\t \" (position, chr, ref_allele, A_depth, T_depth, C_depth, G_depth ) VALUES (\" + \\\n\t\t\t\t\t\t\t\t\t\t\t str(current_base_position) + \\\n\t\t\t\t\t\t\t\t\t\t\t \",'\" + chrName_first + \"','\" + chr_seq[\n\t\t\t\t\t\t\t\t\t\t\t\t current_base_position - 1] + \"',\" + str(A_depth) + \",\" + str(\n\t\t\t\t\t\t\t\t\t\t\t\tT_depth) \\\n\t\t\t\t\t\t\t\t\t\t\t + \",\" + str(C_depth) + \",\" + str(G_depth) + \")\"\n\t\t\t\t\t\t\t\t\t\t\t#print inset_querry\n\t\t\t\t\t\t\t\t\t\t\tcur.execute(inset_querry)\n\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\tA_depth += int(row[3])\n\t\t\t\t\t\t\t\t\t\t\tT_depth += int(row[4])\n\t\t\t\t\t\t\t\t\t\t\tC_depth += int(row[5])\n\t\t\t\t\t\t\t\t\t\t\tG_depth += int(row[6])\n\t\t\t\t\t\t\t\t\t\t\tupdate_querry = \"UPDATE \" + table_name + \" set A_depth=\" + str(A_depth) + \\\n\t\t\t\t\t\t\t\t\t\t\t \", T_depth=\" + str(T_depth) + \", C_depth=\" + str(\n\t\t\t\t\t\t\t\t\t\t\t\tC_depth) + \", G_depth=\" + \\\n\t\t\t\t\t\t\t\t\t\t\t str(G_depth) + \" where position=\" + str(current_base_position)\n\t\t\t\t\t\t\t\t\t\t\t#print update_querry\n\t\t\t\t\t\t\t\t\t\t\tcur.execute(update_querry)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tprint \"different in read length and quality length\", sam_line_first\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint \"first and second read ID do not match\", read_ID_first\n\t\t\tsam_line_first = inputfile_sam.readline()\n\t\tinputfile_sam.close()\n\treturn total_reads_num", "def read(self, filename, byte_offset, length, threadID):\n self.lock.acquire()\n if filename not in self.files_on_disk:\n self.lock.release()\n return \"ERROR: NO SUCH FILE\\n\"\n read_file = self.files_on_disk[filename]\n if(read_file.num_bytes < byte_offset + length):\n self.lock.release()\n return \"ERROR: INVALID BYTE RANGE\\n\"\n else:\n with open(\".storage/\" + filename) as f:\n if self.files_on_disk[filename].type == \"jpg\":\n f.seek(byte_offset*8)\n contents = f.read(length*8)\n else:\n f.seek(byte_offset)\n contents = f.read(length)\n self.lock.release()\n return \"ACK %d\\n%s\\n\" % (length, contents)", "def read_until(self, expected=serial.LF, size=None) -> bytes:\n logger.debug(f\"read_until(expected={expected}, size={size})\")\n return self._con.read_until(expected=expected, size=size)", "def read(self, file_offset: int, buffer):\n bytes_read = 0\n block_to_read = file_offset // self.fs.block_size # local to inode\n # print(block_to_read, file_offset)\n if block_to_read == 0:\n offset_in_block = file_offset\n else:\n offset_in_block = file_offset % block_to_read\n read_buffer = bytearray(self.fs.block_size)\n\n while bytes_read < len(buffer):\n block_addr = self.getDiskAddrOfBlock(self.fs, block_to_read)\n if block_addr == -1: # no more data to read\n break\n\n if self.fs.block_map[block_addr] == 1: # skip if block isn't allocated\n # Get block: check cache first, otherwise read in\n cached_block = self.fs.blockCache.get(block_addr)\n if cached_block != None:\n read_buffer = cached_block\n else:\n self.fs.block_device.read_block(block_addr, read_buffer)\n \n # Read to end of block, or read to end of buffer, whichever's shorter\n bytes_to_read = min(self.fs.block_size-offset_in_block, len(buffer)-bytes_read)\n start = bytes_read\n stop = start + bytes_to_read\n read_start = offset_in_block\n read_stop = read_start + bytes_to_read\n # print(start,stop,read_start,read_stop)\n buffer[start:stop] = read_buffer[read_start:read_stop]\n \n bytes_read += bytes_to_read\n # Remaining blocks will be (left-)aligned\n if offset_in_block != 0:\n offset_in_block = 0\n\n block_to_read += 1\n\n return bytes_read", "def readfile(name, outstream, start=0, end=None):", "def readfrom(self, addr: int, nbytes: int, stop: bool = True, /) -> bytes:", "def readfrom(self, addr: int, nbytes: int, stop: bool = True, /) -> bytes:", "def __read_files(self, kind:str, prog:progress=None):\n\t\tself.readed_sectors = 0\n\t\tself.read_elapsed = 0.0\t\n\t\t\n\t\tself.__ipc_send_progress(prog, 0)\n\n\t\tself.filematrix.reset(kind=='dynamic')\n\n\t\twhile not self.filematrix.done():\t\t\n\t\t\tif (self.__check_terminated()):\n\t\t\t\treturn;\t\t\t\n\t\t\t\t\t\t\n\t\t\tfp = self.filematrix.next()\t\n\n\t\t\t####logging.info('read path:' + fp.path + ', size: ' + str(fp.size) + ', seed: ' + str(fp.rand_seed))\n\t\t\t\n\t\t\tif not os.path.exists(fp.folder):\n\t\t\t\traise_error(FileExistsError, myerror.dir_error)\n\n\t\t\tfile_time = 0.0\n\t\t\tstart = time.time()\t\t\t\n\t\t\t\n\t\t\twith iolib.fopen(fp.path, 'rd') as f:\n\t\t\t\tremain = fp.size\n\t\t\t\tfile_time = 0.0\n\t\t\t\tstart = 0.0\n\t\t\t\telapsed = 0.0\t\t\t\t\n\t\t\t\t\n\t\t\t\twhile (remain != 0):\n\t\t\t\t\tchunk_sectors = min(remain, self.max_buff_size)\t\t\t\t\t\t\t\t\t\n\t\t\t\t\texpected = self.__random_chunk_pattern(chunk_sectors, fp.rand_seed)\t\t\t\t\t\n\t\t\t\t\t#expected = self.__next_chunk_pattern(chunk_sectors)\t\n\n\t\t\t\t\tif (self.__check_terminated()):\n\t\t\t\t\t\treturn;\n\n\t\t\t\t\treal, bytesRead, elapsed = iolib.read(512 * chunk_sectors, f)\n\t\t\t\t\tfile_time += elapsed\n\t\t\t\t\t\t\t\n\t\t\t\t\tif (real != expected):\n\t\t\t\t\t\tif (self.__check_terminated()):\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\traise_exception(BaseException, myerror.pattern_error, \"compare error at the file:\" + fp.path)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \n\t\t\t\t\tself.readed_sectors += int(bytesRead / 512)\n\t\t\t\t\tremain = remain - chunk_sectors\n\t\t\t\t\n\t\t\t\tself.read_elapsed += file_time\t\t\n\t\t\t\ttime.sleep(0.001)\t\t\n\n\t\t\tself.__ipc_send_progress(prog, self.filematrix.get_progress())", "def read(self, size=-1):\n chunk_index, prefix_size = self._index_pos(self._pos)\n prefixed_buffer = []\n try:\n if size < 0:\n while True:\n prefixed_buffer.append(self._readchunk(chunk_index))\n chunk_index += 1\n else:\n need = prefix_size + size\n while need > 0:\n chunk_data = self._readchunk(chunk_index)\n prefixed_buffer.append(chunk_data[:need])\n need -= len(chunk_data)\n chunk_index += 1\n\n except EOFError:\n # PR#16/18 - support identifying EOF\n # use a read() as a sync from desired position to actual position\n # read(0) can be used as a synchronization call\n dec_eof_position = self._members[-1].start_pos + self._members[-1].isize\n self._pos = dec_eof_position\n if prefixed_buffer:\n # subtracting the data in the EOF Case so the normal path will add it back\n # before the function return to avoid changing the path\n # adding up lengths rather than concatenating here to avoid creating new buffers\n self._pos -= sum([len(x) for x in prefixed_buffer]) - prefix_size\n prefixed_buffer = b\"\".join(prefixed_buffer)\n result = prefixed_buffer[prefix_size:]\n self._pos += len(result)\n return result" ]
[ "0.6459113", "0.62749535", "0.6266084", "0.612903", "0.58795774", "0.5834802", "0.58217543", "0.5724661", "0.5703309", "0.5690901", "0.56636", "0.56636", "0.56429064", "0.56224495", "0.56210196", "0.56070936", "0.5563991", "0.55546427", "0.5546318", "0.55455285", "0.5493936", "0.54743534", "0.5462547", "0.5455163", "0.54385513", "0.54368544", "0.542537", "0.542537", "0.54176915", "0.53925234" ]
0.74218076
0
check for data corruption based on an unknown message type in the header set _file_corrupt flag to true if a corrupt packet is found
def _check_packet_corruption(self, header): data_corrupt = False if header.msg_type == 0 or header.msg_size == 0 or header.msg_size > 10000: if not self._file_corrupt and self._debug: print('File corruption detected') data_corrupt = True self._file_corrupt = True return data_corrupt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert_bad_data():\n get_file_reply(files[2][0], files[2][1])", "def check_headers(df, filename):\n print(\"Checking headers for: \" + filename)\n read_message = \"\"\n\n original_colnames = df.columns.tolist()\n # good_colnames = [\"Marker\",\"Chr\",\"Position\",\"Effect_allele\",\"Other_allele\",\"Beta\",\"SE\",\"Pval\",\"EAF\",\"N\",\"Imputed\",\"Info\",\"Information_type\"]\n\n # Before actually checking the contents header, are there even headers?\n passed = False\n for col in original_colnames:\n if col.lower().strip() in [\"name\", \"snp\", \"snpid\", \"id\", \"markername\", \"marker\", \"rsid\"]:\n passed = True\n\n # Fail the check if the name column is not found, this is to stop the checks if there is a file without a header\n if not passed:\n # First check whether this is one of the files of Malik, where the columns were missing\n if filename.split('/')[-1].startswith('INTERSTROKE'):\n # Add column names and moveverything down\n first_data_row = df.columns.tolist()\n df.loc[-1] = first_data_row # adding a row\n df.index = df.index + 1 # shifting index\n df = df.sort_index() # sorting by index\n df.columns = [\"SNPID\", \"chr\", \"position\", \"coded_all\", \"noncoded_all\", \"strand_genome\", \"beta\", \"SE\", \"pval\", \"AF_coded_all\", \"n_cases\", \"n_controls\", \"imputed\", \"oevar_imp\"]\n original_colnames = df.columns.tolist()\n read_message = read_message + \"NAMECOLCHECK;CUSTOMCOLS\" \n\n elif filename.split('/')[-1].startswith('ASGC'):\n # Add column names and moveverything down\n first_data_row = df.columns.tolist()\n df.loc[-1] = first_data_row # adding a row\n df.index = df.index + 1 # shifting index\n df = df.sort_index() # sorting by index\n df.columns = [\"SNPID\", \"chr\", \"position\", \"n_cases\", \"n_controls\", \"coded_all\", \"noncoded_all\", \"AF_coded_all\", \"beta\", \"SE\", \"pval\", \"imputed\", \"info\"]\n original_colnames = df.columns.tolist()\n read_message = read_message + \"NAMECOLCHECK;CUSTOMCOLS\" \n\n else:\n # print(\"Something went wrong for \" + filename)\n # print(\"Please make sure there are headers in the file and that there is a name/id/marker column\")\n return df, \"NAMECOLCHECK;FAILED\"\n \n # Variable to hold all unknown columns\n unknown_cols = []\n\n # Loop over al colnames and rename it\n for index,col in enumerate(original_colnames):\n if col.lower().strip() in [\"name\", \"snp\", \"snpid\", \"id\", \"markername\", \"marker\", \"rsid\"]:\n original_colnames[index] = \"Marker\"\n\n elif col.lower().strip() in [\"chromosome\", \"chr\", \"chrom\"]:\n original_colnames[index] = \"Chr\"\n\n elif col.lower().strip() in [\"pos\", \"position\", \"bp\"]:\n original_colnames[index] = \"Position\"\n\n elif col.lower().strip() in [\"effallele\", \"eff_allele\", \"effectallele\", \"effect_allele\", \"coded_all\", \"codedall\", \"allele1\"]:\n original_colnames[index] = \"Effect_allele\"\n\n elif col.lower().strip() in [\"noneffallele\", \"noneff_allele\", \"noneffectallele\", \"noneffect_allele\", \"non_coded_all\", \"noncoded_all\", \"noncodedall\", \"other_allele\", \"otherallele\", \"allele2\"]:\n original_colnames[index] = \"Other_allele\"\n\n elif col.lower().strip() in [\"beta\"]:\n original_colnames[index] = \"Beta\"\n\n elif col.lower().strip() in [\"se\", \"sebeta\", \"stderr\"]:\n original_colnames[index] = \"SE\"\n\n elif col.lower().strip() in [\"p\", \"pval\", \"p-value\"]:\n original_colnames[index] = \"Pval\"\n\n elif col.lower().strip() in [\"eaf\", \"freq1\", \"af_coded_all\", \"effallelefreq\"]:\n original_colnames[index] = \"EAF\"\n\n elif col.lower().strip() in [\"n\", \"ntot\", \"n_total\"]:\n original_colnames[index] = \"N\"\n\n elif col.lower().strip() in [\"ncase\", \"ncases\", \"n_case\", \"n_cases\"]:\n original_colnames[index] = \"N_cases\"\n\n elif col.lower().strip() in [\"ncontrol\", \"ncontrols\", \"n_control\", \"n_controls\"]:\n original_colnames[index] = \"N_controls\"\n\n elif col.lower().strip() in [\"imputed\", \"imp\"]:\n original_colnames[index] = \"Imputed\"\n\n elif col.lower().strip() in [\"inf\", \"info\", \"info_rsq\", \"rsqr\"]:\n original_colnames[index] = \"Info\"\n\n elif col.lower().strip() in [\"inf_type\", \"information_type\"]:\n original_colnames[index] = \"Information_type\"\n\n # Not neccesary for the toolkit, but reduce the error messages\n elif col.lower().strip() in [\"strand\", \"strand_genome\"]:\n original_colnames[index] = \"Strand\"\n\n elif col.lower().strip() in [\"oevar_imp\"]:\n original_colnames[index] = \"oevar_imp\"\n\n elif col.lower().strip() in [\"pval.t\"]:\n original_colnames[index] = \"pval.t\"\n\n elif col.lower().strip() in [\"df.t\"]:\n original_colnames[index] = \"df.t\"\n\n elif col.lower().strip() in [\"approxdf\"]:\n original_colnames[index] = \"approxdf\"\n\n elif col.lower().strip() in [\"or\"]:\n original_colnames[index] = \"OR\"\n\n else:\n # print(\"Could not match the string: \" + col)\n # print(\"Please make sure this column is handled correctly in the toolkit\")\n unknown_cols.append(col)\n\n # Change column names\n df.columns = original_colnames\n\n # Write the unknown columns into the fail_reason variable\n if len(unknown_cols) > 0:\n read_message = read_message + \"NAMECOLCHECK;PASSED\" + \" UNRECOGNIZED;\" + ' '.join([str(elem) for elem in unknown_cols])\n else:\n read_message = read_message + \"NAMECOLCHECK;PASSED\"\n\n return df, read_message", "def test_process_optional_header_data_bad_header_length(self):\n with self.assertRaises(ValueError):\n decoder.process_optional_header_data(BytesIO(td.external_timestamp(True)), 3, self.mask)", "def test_bad_control_packet_header(self, data):\n with pytest.raises(StreamError) as e:\n ControlHeaderStruct.parse(data)", "def test_bad_control_packet_header(self, data):\n with pytest.raises(StreamError) as e:\n ControlHeaderStruct.parse(data)", "def file_corruption(self):\n return self._file_corrupt", "def test_bad_ipbus_packet_header(self, data):\n with pytest.raises(StreamError) as e:\n PacketHeaderStruct.parse(data)", "def test_bad_ipbus_packet_header(self, data):\n with pytest.raises(StreamError) as e:\n PacketHeaderStruct.parse(data)", "def process_message(msg):\n global fout\n print(\"received \")\n if len(msg)==200: #is header or end\n msg_in=msg.decode(\"utf-8\",\"ignore\")\n msg_in=msg_in.split(\",,\")\n if msg_in[0]==\"header\": #header\n filename=extract_file_data(msg_in[1])\n file_out=\"copy-\"+filename\n fout=open(file_out,\"wb\") #use a different filename\n\n if msg_in[0]==\"end\": #is it really last packet?\n in_hash_final=in_hash_md5.hexdigest()\n if in_hash_final==msg_in[2]:\n print(\"File copied OK -valid hash \",in_hash_final)\n else:\n print(\"Bad file receive \",in_hash_final)\n return False\n else:\n if msg_in[0]!=\"header\":\n in_hash_md5.update(msg)\n return True\n else:\n return False\n else:\n in_hash_md5.update(msg)\n #msg_in=msg.decode(\"utf-8\",\"ignore\")\n if len(msg) <100:\n print(msg)\n return True", "def _validate_header(self, cleartext_header, decrypted_header):\n import struct\n\n magic_number1 = struct.unpack(\"!I\", decrypted_header[:4])[0]\n # file_version = struct.unpack(\"!I\", decrypted_header[4:8])[0]\n # key_stretches = struct.unpack(\"!I\", decrypted_header[8:12])[0]\n magic_number2 = struct.unpack(\"!I\", decrypted_header[12:])[0]\n if (self.__magic_number != magic_number1 or\n self.__magic_number != magic_number2):\n raise DecryptionError()\n if cleartext_header != decrypted_header:\n raise FileCorruptionError()", "def process_message(msg):\r\n print(\"received \")\r\n global bytes_in\r\n if len(msg) == 200: # is header or end\r\n print(\"found header\")\r\n msg_in = msg.decode(\"utf-8\")\r\n msg_in = msg_in.split(\",,\")\r\n print(msg_in)\r\n if msg_in[0] == \"end\": # is it really last packet?\r\n in_hash_final = in_hash_md5.hexdigest()\r\n if in_hash_final == msg_in[2]:\r\n print(\"File copied OK -valid hash \", in_hash_final)\r\n return -1\r\n else:\r\n print(\"Bad file receive \", in_hash_final)\r\n return False\r\n else:\r\n if msg_in[0] != \"header\":\r\n in_hash_md5.update(msg)\r\n return True\r\n else:\r\n return False\r\n else:\r\n bytes_in = bytes_in + len(msg)\r\n in_hash_md5.update(msg)\r\n print(\"found data bytes= \", bytes_in)\r\n return True", "def __checkFile(self, filename):\n \n try:\n with open(filename, 'r') as f:\n first_line = f.readline()\n \n if not len(first_line.split(\"\\t\")) == 19:\n raise BadProteomeScoutFile(\"N/A\")\n \n \n except:\n BadProteomeScoutFile(\"Invalid ProteomeScout flat file %s.\\nFile is invalid or corrupted\" % str(filename))", "def test_corrupt(logger):\n tamper = actions.tamper.TamperAction(None, field=\"flags\", tamper_type=\"corrupt\", tamper_value=\"R\")\n assert tamper.field == \"flags\", \"Tamper action changed fields.\"\n assert tamper.tamper_type == \"corrupt\", \"Tamper action changed types.\"\n assert str(tamper) == \"tamper{TCP:flags:corrupt}\", \"Tamper returned incorrect string representation: %s\" % str(tamper)\n\n packet = layers.packet.Packet(IP(src=\"127.0.0.1\", dst=\"127.0.0.1\")/TCP(sport=2222, dport=3333, seq=100, ack=100, flags=\"S\"))\n original = copy.deepcopy(packet)\n tamper.tamper(packet, logger)\n\n new_value = packet[TCP].flags\n\n # Must run this check repeatedly - if a scapy fuzz-ed value is not properly\n # ._fix()-ed, it will return different values each time it's requested\n for _ in range(0, 5):\n assert packet[TCP].flags == new_value, \"Corrupted value is not stable\"\n\n # Confirm tamper didn't corrupt anything else in the TCP header\n assert confirm_unchanged(packet, original, TCP, [\"flags\"])\n\n # Confirm tamper didn't corrupt anything else in the IP header\n assert confirm_unchanged(packet, original, IP, [])", "def test_bad_data(self):\r\n # LB180210_3_corrupted.PD0 has three records in it, the 2nd record was corrupted\r\n with open(os.path.join(RESOURCE_PATH, 'LB180210_3_corrupted.PD0'), 'rb') as stream_handle:\r\n\r\n parser = AdcpPd0Parser(self.config_recov, stream_handle, self.exception_callback)\r\n\r\n # try to get 3 particles, should only get 2 back\r\n # the second one should correspond to ensemble 3\r\n parser.get_records(3)\r\n\r\n log.debug('Exceptions : %s', self.exception_callback_value[0])\r\n\r\n self.assertEqual(len(self.exception_callback_value), 1)\r\n self.assert_(isinstance(self.exception_callback_value[0], RecoverableSampleException))", "def _validate(self):\n if not self._contents.has_key('type'):\n raise ValidationFailed(\"Metadata file %s contains no type field\" % (self._filename))\n \n if not self._contents.has_key('version'):\n raise ValidationFailed(\"Metadata file %s contains no version field\" %\n (self._filename))", "def test_bad_data(self):\n # Bad checksum\n # If checksum is bad, skip the record and continue parsing.\n self.stream_handle = StringIO(AdcpsJlnStcParserUnitTestCase.BAD_CHECKSUM)\n self.parser = AdcpsJlnStcParser(self.config, self.start_state, self.stream_handle,\n self.state_callback, self.pub_callback, self.exception_callback)\n # Only the header and second record, particle_b should be returned.\n result = self.parser.get_records(3)\n self.assertEqual(self.publish_callback_value[0], self.particle_header_footer)\n self.assertEqual(self.publish_callback_value[1], self.particle_b)\n if len(result) != 2:\n self.fail(\"Expected two records and got %d. Record containing bad data should have been skipped.\", len(result))\n \n # Incorrect number of bytes\n # If numbytes is incorrect, skip the record and continue parsing.\n self.start_state = {StateKey.POSITION: 0}\n self.stream_handle = StringIO(AdcpsJlnStcParserUnitTestCase.BAD_NUM_BYTES)\n self.parser = AdcpsJlnStcParser(self.config, self.start_state, self.stream_handle,\n self.state_callback, self.pub_callback, self.exception_callback) \n result = self.parser.get_records(3)\n self.assertEqual(self.publish_callback_value[0], self.particle_header_footer)\n self.assertEqual(self.publish_callback_value[1], self.particle_b)\n if len(result) != 2:\n self.fail(\"Expected two records and got %d. Record containing bad data should have been skipped.\", len(result))", "def __check(self, msg):\n msg = bytearray(msg)\n # Check that header is correct\n if msg[:2] != b'\\xFB\\xBF':\n return False\n # Check that ending is correct\n elif msg[-1:] != b'\\xED':\n return False\n # Check that check byte is correct\n elif msg[-2:-1] != bytes([sum(msg[2:-2]) % 256]):\n return False\n else:\n return True", "def _unknown_format(self, format):\n\n raise errors.NotAcceptable('unknown data format: ' + format)", "def is_file_type_error(self):\n return self._tag == 'file_type_error'", "def verify_header (filename, htypes=None):\n\n # dictionary\n dict_head = {\n # raw header\n # commenting out SIMPLE, BSCALE and BZERO - basic keywords\n # that will be present in images but not in binary fits tables\n #'SIMPLE': {'htype':'raw', 'dtype':bool, 'DB':False, 'None_OK':True},\n #'BSCALE': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n #'BZERO': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BITPIX': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS1': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS2': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BUNIT': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n #'CCD-AMP': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'SET-TEMP': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'CCD-TEMP': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'XBINNING': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'YBINNING': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n #'CCD-SET': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'ALTITUDE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'AZIMUTH': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DOMEAZ': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'RADESYS': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'EPOCH': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'RA': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'RA-REF': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n #'RA-TEL': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'DEC': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'DEC-REF': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n #'DEC-TEL': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'HA': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'FLIPSTAT': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'EXPTIME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'ISTRACKI': {'htype':'raw', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'ACQSTART': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'ACQEND': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPSSTART': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPSEND': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPS-SHUT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DATE-OBS': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'MJD-OBS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'LST': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'UTC': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'TIMESYS': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'ORIGIN': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'MPC-CODE': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'TELESCOP': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'CL-BASE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-MAST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-DOME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-AIRCO': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-PIER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PRESSURE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-PIER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-DOME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-ROOF': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-AIRCO': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-MAST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-STRUT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRING': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-SPIDER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-FWN': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-FWS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-M2HOLD': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-GUICAM': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-M1': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYWIN': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYGET': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYCP': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PRES-CRY': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDAVE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDGUST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDDIR': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SITELAT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SITELONG': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ELEVATIO': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n #'WEATIME': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'FILTER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n #'FILTERID': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'CCD-ID': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'CONTROLL': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'DETSPEED': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n 'CCD-NW': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'CCD-NH': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'INSTRUME': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'FOCUSPOS': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n 'IMAGETYP': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'OBJECT': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'AIRMASS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'ORIGFILE': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'OBSERVER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'ABOTVER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PROGNAME': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PROGID': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GUIDERST': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GUIDERFQ': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'TRAKTIME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ADCX': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ADCY': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n #\n # full header\n 'BB-V': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'BB-START': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'KW-V': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'LOG': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'LOG-IMA': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'N-INFNAN': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'XTALK-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'XTALK-F': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'NONLIN-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NONLIN-F': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'GAIN-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'GAIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'GAIN1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'GAIN16': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'OS-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'BIASMEAN': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIASM1': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIASM16': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDNOISE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDN1': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDN16': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIAS1A0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BIAS1A1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'VFITOK1': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'BIAS16A0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BIAS16A1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'VFITOK16': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'MBIAS-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MBIAS-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MB-NDAYS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'SATURATE': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'NOBJ-SAT': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'MFLAT-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MFLAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MF-NDAYS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'MFRING-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MFRING-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'FRRATIO': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'COSMIC-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NCOSMICS': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SAT-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NSATS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'REDFILE': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MASKFILE': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'S-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'S-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'S-NOBJ': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'S-FWHM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'S-FWSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'S-SEEING': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-SEESTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-ELONG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-ELOSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-BKG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-BKGSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-VIGNET': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BKG-CORR': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'BKG-CHI2': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-CF1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-CF16': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-FDEG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BKG-FC0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'A-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'A-INDEX': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'A-PSCALE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-PSCALX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-PSCALY': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-ROT': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-ROTX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-ROTY': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-CAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'A-NAST': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'A-TNAST': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'A-NAMAX': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'A-DRA': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DRASTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DDEC': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DDESTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'PSF-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'PSF-RAD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-RADP': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SIZE': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-FRAC': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SAMP': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-CFGS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-NOBJ': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-FIX': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'PSF-PLDG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PSF-CHI2': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-FWHM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SEE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-PMIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PMAX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PMED': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMAX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMED': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMNM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMXM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMDM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-ESTM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMNM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMXM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMDM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FSTM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMNG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMXG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMDG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-ESTG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMNG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMXG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMDG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FSTG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'PC-CAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PC-NCAL': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PC-TNCAL': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-FNCAL': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NCMAX': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NCMIN': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-ZPFDG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-ZPF0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-TNSUB': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NSUB': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-MZPD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-MZPS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-ZPDEF': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-ZP': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-ZPSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-EXTCO': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'AIRMASSC': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RA-CNTR': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DEC-CNTR': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-AIRM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'NSIGMA': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'LIMEFLUX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'LIMMAG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'NOBJECTS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'RADECOFF': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'FORMAT-P': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'DUMCAT': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'QC-FLAG': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'DATEFILE': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n #\n # transient header\n 'SWARP-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'SWARP-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-REF': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-DXYLOC': {'htype':'trans', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'Z-DX': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DY': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DXSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DYSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FNRLOC': {'htype':'trans', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'Z-FNR': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FNRSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'Z-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-SIZE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'Z-BSIZE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'Z-SCMED': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-SCSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FPEMED': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'Z-FPESTD': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'T-NSIGMA': {'htype':'trans', 'dtype':int, 'DB':True, 'None_OK':True},\n 'T-LFLUX': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'T-NTRANS': {'htype':'trans', 'dtype':int, 'DB':True, 'None_OK':True},\n 'T-FTRANS': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-LMAG': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-NFAKE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'T-FAKESN': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'MC-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MC-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'MC-MODEL': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'TDUMCAT': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'TQC-FLAG': {'htype':'trans', 'dtype':str, 'DB':True, 'None_OK':False},\n }\n\n # read header of filename\n if isfile (filename):\n header = read_hdulist (filename, get_data=False, get_header=True)\n else:\n # return success=False if it does not exist\n log.warning ('file {} does not exist; not able to verify its header'\n .format(filename))\n return False\n\n\n # force [htypes] to be a list\n htypes_list = list(htypes)\n\n # loop keys in dict_head\n for key in dict_head.keys():\n\n # only check keywords with htype matching the input [htypes]\n if dict_head[key]['htype'] not in htypes_list:\n continue\n\n # check that key is present in header\n if key in header:\n\n # provide warning if dtype not as expected and header\n # keyword value is not 'None'\n if (dict_head[key]['dtype'] != type(header[key]) and\n header[key] != 'None'):\n log.warning ('dtype of keyword {}: {} does not match the '\n 'expected dtype: {} in header of {}'\n .format(key, type(header[key]),\n dict_head[key]['dtype'], filename))\n\n # if key goes to DataBase and value is 'None' or None\n # while 'None_OK' is False, raise an exception\n if (dict_head[key]['DB'] and not dict_head[key]['None_OK'] and\n (header[key] is None or header[key] == 'None')):\n msg = ('DataBase keyword {} not allowed to have \\'None\\' or '\n 'None value in header of {}'.format(key, filename))\n log.error (msg)\n raise ValueError (msg)\n\n\n else:\n msg = 'keyword {} not present in header of {}'.format(key, filename)\n # if keyword will be ingested into the database, raise an exception\n if dict_head[key]['DB']:\n log.error (msg)\n raise KeyError (msg)\n\n else:\n log.warning (msg)\n\n\n return", "def _leftovers(self, fl):\n try:\n data = self.sock.recv(1024, fl)\n except socket.error as _:\n return False\n if len(data) != 0:\n tail = data\n while True:\n (head, tail) = Ctrl().split_combined(tail)\n print(\"Got message:\", Ctrl().rem_header(head))\n if len(tail) == 0:\n break\n return True\n return False", "def check_record(self, mask, fullpath):\r\n if self.is_carved_gzip:\r\n decode_error = False\r\n # Flag conflicts\r\n # These flag combinations can not exist together\r\n type_err = \"FolderEvent\" in mask[0] and \"FileEvent\" in mask[0]\r\n fol_cr_err = \"FolderEvent\" in mask[0] and \"Created\" in mask[1] and \\\r\n \"FolderCreated\" not in mask[1]\r\n fil_cr_err = \"FileEvent\" in mask[0] and \"FolderCreated\" in mask[1]\r\n lnk_err = \"SymbolicLink\" in mask[0] and \"HardLink\" in mask[0]\r\n h_lnk_err = \"HardLink\" not in mask[0] and \"LastHardLink\" in mask[1]\r\n h_lnk_err_2 = \"LastHardLink\" in mask[1] and \";Removed\" not in mask[1]\r\n n_used_err = \"NOT_USED-0x0\" in mask[1]\r\n ver_error = \"ItemCloned\" in mask[1] and self.dls_version == 1\r\n\r\n # If any error exists return false to caller\r\n if type_err or \\\r\n fol_cr_err or \\\r\n fil_cr_err or \\\r\n lnk_err or \\\r\n h_lnk_err or \\\r\n h_lnk_err_2 or \\\r\n n_used_err or \\\r\n decode_error or \\\r\n ver_error:\r\n return False\r\n else:\r\n # Record passed tests and may be valid\r\n # return true so that record is included in output reports\r\n return True\r\n else:\r\n # Return true. fsevent file was not identified as being carved\r\n return True", "def test_error_message_header_bad_match_codes(self):\n\n error_type = 4\n error_type_value = Error.ErrorType.OFPET_BAD_MATCH\n\n error_code = 0\n\n iter_given_code = Error.ErrorType.get_class(error_type_value).__iter__()\n length = Error.ErrorType.get_class(error_type_value).__len__()\n\n while error_code < self.MAX_BAD_MATCH_CODE_VALUE or length > 0:\n data = UBInt32(random.randint(2, 250)).pack()\n xid = random.randint(2, 250)\n\n test_value = b'\\x05\\x01\\x00\\x10' + UBInt32(xid).pack() + UBInt16(error_type).pack() + \\\n UBInt16(error_code).pack() + data\n\n if error_code < self.MAX_BAD_MATCH_CODE_VALUE:\n error_code += 1\n length -= 1\n\n test_object_error_messages = Error.ErrorMsg(xid, error_type_value, iter_given_code.__next__(), data).pack()\n\n self.assertEqual(test_value, test_object_error_messages)", "def process_headers(fin, fout, fixed_headers):\n filtered_headers = set(item[0] for item in fixed_headers)\n filtered_headers.add(\"SAMPLE\")\n expected_values = {\n name: value for name, asserted, value in fixed_headers if asserted\n }\n errors = False\n for raw_line in fin:\n if raw_line.startswith('##'):\n # TODO: This will break if the metadata header is bad.\n name, value = raw_line[2:].rstrip().split('=', 1)\n if name in filtered_headers:\n if name in expected_values:\n if value != expected_values[name]:\n errors = True\n # TODO: propper logging\n sys.stderr.write(\n 'tcga-vcf-reheader: mismatch {}={}\\n'.format(\n name, value\n )\n )\n else: # Just some other header...\n fout.write(raw_line)\n else:\n break\n fout.write(raw_line) # raw_line should now be the data header line.\n return errors", "def verify(self):\n if len(self.headers) not in [1, 5]:\n raise IncorrectNumberOfExtensions(\"header\", \"5\", self)\n if len(self.pixeldata) not in [1, 2, 3]:\n raise IncorrectNumberOfExtensions(\"pixel\", \"1, 2, or 3\", self)\n if len(self.tabledata) not in [0,4]:\n raise IncorrectNumberOfExtensions(\"table\", \"4\", self)", "def test_bad_ascii_data(self):\n\n # Bad file\n asc_filename = os.path.join(TESTDATA, 'bad_ascii_format.asc')\n try:\n read_layer(asc_filename)\n except ReadLayerError, e:\n # Check that error message is reasonable, e.g.\n # File /home/nielso/sandpit/inasafe_data/test/bad_ascii_format.asc\n # exists, but could not be read. Please check if the file can\n # be opened with e.g. qgis or gdalinfo\n\n msg = 'Unexpected error message for corrupt asc file: %s' % e\n assert 'exists' in str(e), msg\n assert 'gdalinfo' in str(e), msg\n assert 'qgis' in str(e), msg\n assert 'Please' in str(e), msg\n\n # No file\n asc_filename = 'nonexisting_ascii_file_234xxxlcrhgqjk.asc'\n try:\n read_layer(asc_filename)\n except ReadLayerError, e:\n # Check that this error message reflects that file did not exist\n msg = 'Unexpected error message for non existing asc file: %s' % e\n assert 'Could not find file' in str(e), msg", "def _check_header(\n self, filename, run_check_acceptability=True, background_lsts=True\n ):\n uvd_file = UVH5()\n with h5py.File(filename, \"r\") as f:\n header = f[\"/Header\"]\n uvd_file._read_header(\n header,\n filename,\n run_check_acceptability=run_check_acceptability,\n background_lsts=background_lsts,\n )\n\n # temporarily remove data, flag, and nsample arrays, so we only check metadata\n if self.data_array is not None:\n data_array = self.data_array\n self.data_array = None\n replace_data = True\n else:\n replace_data = False\n if self.flag_array is not None:\n flag_array = self.flag_array\n self.flag_array = None\n replace_flags = True\n else:\n replace_flags = False\n if self.nsample_array is not None:\n nsample_array = self.nsample_array\n self.nsample_array = None\n replace_nsamples = True\n else:\n replace_nsamples = False\n\n if self != uvd_file:\n raise AssertionError(\n \"The object metadata in memory and metadata on disk are different\"\n )\n else:\n # clean up after ourselves\n if replace_data:\n self.data_array = data_array\n if replace_flags:\n self.flag_array = flag_array\n if replace_nsamples:\n self.nsample_array = nsample_array\n del uvd_file\n return", "def test_verify_corrupt_archive(self):\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n output_files = os.listdir(\"testfiles/output\")\n archives = [elem for elem in output_files if \"vol\" in elem]\n for archive in archives:\n # Edit source file\n with open(\"testfiles/output/\" + archive, 'r+') as f:\n f.write('This writes text into each archive file to corrupt it.')\n # Test verify for the file\n try:\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable', options=[])\n except CmdError as e:\n # Should return a 21 error code for \"hash mismatch\"\n self.assertEqual(e.exit_status, 21, str(e))\n else:\n self.fail('Expected Hash Mismatch Error not thrown')", "def test_error_message_header_bad_request_codes(self):\n error_type = 1\n error_type_value = Error.ErrorType.OFPET_BAD_REQUEST\n\n error_code = 0\n\n iter_given_code = Error.ErrorType.get_class(error_type_value).__iter__()\n length = Error.ErrorType.get_class(error_type_value).__len__()\n\n while error_code < self.MAX_BAD_REQUEST_CODE_VALUE or length > 0:\n\n data = UBInt32(random.randint(2, 250)).pack()\n xid = random.randint(2, 250)\n\n test_value = b'\\x05\\x01\\x00\\x10' + UBInt32(xid).pack() + UBInt16(error_type).pack() + \\\n UBInt16(error_code).pack() + data\n\n if error_code < self.MAX_BAD_REQUEST_CODE_VALUE:\n error_code += 1\n\n length -= 1\n\n test_object_error_messages = Error.ErrorMsg(xid, error_type_value, iter_given_code.__next__(), data).pack()\n\n self.assertEqual(test_value, test_object_error_messages)", "def test_corrupt_dataofs(logger):\n packet = layers.packet.Packet(IP(src=\"127.0.0.1\", dst=\"127.0.0.1\")/TCP(sport=2222, dport=3333, seq=100, ack=100, flags=\"S\", dataofs=\"6L\"))\n original = copy.deepcopy(packet)\n tamper = actions.tamper.TamperAction(None, field=\"dataofs\", tamper_type=\"corrupt\")\n\n tamper.tamper(packet, logger)\n\n # Confirm tamper actually corrupted the checksum\n assert packet[TCP].dataofs != \"0\"\n new_value = packet[TCP].dataofs\n\n # Must run this check repeatedly - if a scapy fuzz-ed value is not properly\n # ._fix()-ed, it will return different values each time it's requested\n for _ in range(0, 5):\n assert packet[TCP].dataofs == new_value, \"Corrupted value is not stable\"\n\n # Confirm tamper didn't corrupt anything else in the TCP header\n assert confirm_unchanged(packet, original, TCP, [\"dataofs\"])\n\n # Confirm tamper didn't corrupt anything in the IP header\n assert confirm_unchanged(packet, original, IP, [])" ]
[ "0.6176655", "0.59605867", "0.5881001", "0.58625066", "0.58625066", "0.5849321", "0.5756507", "0.5756507", "0.5756501", "0.57418764", "0.56147045", "0.55945456", "0.55350333", "0.55093884", "0.54594165", "0.54587096", "0.5445845", "0.5425486", "0.5398548", "0.5377166", "0.5343827", "0.53378916", "0.533632", "0.5327354", "0.5324229", "0.5321521", "0.5290967", "0.5267165", "0.5253743", "0.524783" ]
0.76328593
0
Creates an array attribute containing array attributes of integers. If the operand is already an array attribute, forwards it. Otherwise treats the operand as a list of attributes or integers, potentially interpserced, to create a new arrayofarray attribute. Expects the threadlocal MLIR context to have been set by the context manager.
def _get_int_int_array_attr( values: Optional[Union[ArrayAttr, Sequence[Union[ArrayAttr, IntOrAttrList]]]] ) -> ArrayAttr: if values is None: return ArrayAttr.get([]) if isinstance(values, ArrayAttr): return values if isinstance(values, list): values = [ ArrayAttr.get( [IntegerAttr.get(IntegerType.get_signless(64), v) for v in value]) for value in values ] return ArrayAttr.get(values)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enableAttributeArray(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass", "def setAttributeArray(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass", "def __init__(\n self,\n array: Array,\n ctx: Ctx = None,\n ):\n self._array = array\n super().__init__(\n ctx, lt.Array(ctx if ctx is not None else default_ctx(), array)\n )", "def Array( *args ):\n tArray = ()\n for arg in args:\n tArray += (arg,)\n return tArray", "def Array( *args ):\n tArray = ()\n for arg in args:\n tArray += (arg,)\n return tArray", "def __array__(self, *args, **kwargs):\n\n return self.data", "def basic_array_creation():\n print('From normal creation')\n arr: pa.Array = pa.array([1, 2, 3, 4, 5], type=pa.int8())\n print(arr)\n\n print('From pandas series')\n arr: pa.Array = pa.Array.from_pandas(pd.Series([1, 2, 3, 4, 5]))\n print(arr)", "def test_llvm_array2():\n e = LLVMEvaluator()\n e.add_module(\"\"\"\\\n\n%array = type {i64, [3 x i64]}\n\n; Sum the three elements in %a\ndefine i64 @sum3(%array* %a)\n{\n %a1addr = getelementptr %array, %array* %a, i64 0, i32 1, i64 0\n %a1 = load i64, i64* %a1addr\n\n %a2addr = getelementptr %array, %array* %a, i64 0, i32 1, i64 1\n %a2 = load i64, i64* %a2addr\n\n %a3addr = getelementptr %array, %array* %a, i64 0, i32 1, i64 2\n %a3 = load i64, i64* %a3addr\n\n %tmp = add i64 %a2, %a1\n %r = add i64 %a3, %tmp\n\n ret i64 %r\n}\n\n\ndefine i64 @f()\n{\n %a = alloca %array\n\n %idx0 = getelementptr %array, %array* %a, i64 0, i32 0\n store i64 3, i64* %idx0\n\n %idx1 = getelementptr %array, %array* %a, i64 0, i32 1, i64 0\n store i64 1, i64* %idx1\n %idx2 = getelementptr %array, %array* %a, i64 0, i32 1, i64 1\n store i64 2, i64* %idx2\n %idx3 = getelementptr %array, %array* %a, i64 0, i32 1, i64 2\n store i64 3, i64* %idx3\n\n %r = call i64 @sum3(%array* %a)\n ret i64 %r\n}\n\"\"\")\n assert e.intfn(\"f\") == 6", "def new_array(lhs_ast, new_ast):\n assert(new_ast.tag == 'NEW_ARRAY')\n rank = new_ast.rank\n assert(rank > 0)\n type_name = new_ast.type_name\n array_var = checker.new_temp()\n env = {}\n template = '{\\n'\n template += 'var %s%s %s;\\n' % (type_name, '[]'*rank, array_var)\n if rank == 1:\n template += new_array1(new_ast, array_var, env)\n # elif rank == 2:\n # template += new_array2(new_ast, array_var)\n else:\n checker.errors.add(new_ast.coord, 'UNINMP',\n 'array initialization for rank > 1 not implemented')\n return Ast('BLOCK', coord=new_ast.coord)\n env['$lhs'] = lhs_ast\n template += '$lhs = %s;\\n' % array_var\n template += '}\\n'\n return Template.substitute('block', template, env)", "def make_array(self, new_capacity):\n return (new_capacity * ctypes.py_object)()", "def makearray(self, *args, **kwargs):\n return _image.image_makearray(self, *args, **kwargs)", "def _create_action(value: float) -> types.NestedArray:\n return np.array(value, dtype=np.float64)", "def _make_array(self, c):\n return (c * ctypes.py_object)()", "def _int64_array_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))", "def _parse_array(\n value_expr: str, target_expr: str, ref_parts: List[str],\n a_type: mapry.Array, registry_exprs: Mapping[mapry.Class, str],\n auto_id: mapry.py.generate.AutoID, py: mapry.Py) -> str:\n uid = auto_id.next_identifier()\n\n item_parsing = _parse_value(\n value_expr=\"item_{uid}\".format(uid=uid),\n target_expr=\"target_item_{uid}\".format(uid=uid),\n ref_parts=ref_parts + [\"str(i_{uid})\".format(uid=uid)],\n a_type=a_type.values,\n registry_exprs=registry_exprs,\n auto_id=auto_id,\n py=py)\n\n return _PARSE_ARRAY_TPL.render(\n value_expr=value_expr,\n target_expr=target_expr,\n ref_parts=ref_parts,\n uid=uid,\n minimum_size=a_type.minimum_size,\n maximum_size=a_type.maximum_size,\n value_py_type=mapry.py.generate.type_repr(a_type=a_type.values, py=py),\n item_parsing=item_parsing).rstrip('\\n')", "def __new__(\n cls,\n input_array: Union[np.ndarray, Sequence],\n units: Union[Unit, str, None] = None,\n ) -> Any:\n\n arr = np.array(input_array, copy=True).view(cls)\n\n if isinstance(input_array, ValueArray) and units is None:\n arr.units = input_array.units\n else:\n arr.units = _units_init(cls, units)\n\n return arr", "def a(*args, **kwargs):\n return np.array(*args, **kwargs)", "def getattr_array(a: np.array, attr: str) -> np.ndarray:\n return np.vectorize(getattr, otypes=(type(getattr(a.flatten()[0], attr)),))(\n a, arrayobj1d([attr])\n )", "def to_context_mat( iterable, context=FloatContext ):\n to_float = context.from_int\n return [[to_float(x) for x in row] for row in iterable]", "def _img_array_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value.ravel()))", "def _img_array_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value.ravel()))", "def __array__(self):\n return self.to_array()", "def __array_wrap__(self, result, **kwargs):\n\n return self.__class__(result, self.shape)", "def construct_array_type(cls, *args):\n if len(args) > 0:\n raise NotImplementedError(\"construct_array_type does not support arguments\")\n return FletcherArray", "def test_op_iadd_offload_array_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n o = a + 1\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a += offl_o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def array_form(self):\n return tuple(self)", "def c_array(ctype, values):\n\n arr = (ctype*len(values))()\n arr[:] = values\n return arr", "def test_op_iadd_array_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n o = a + 1\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a += o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def _make_array(self, capacity):\n return (capacity * ctypes.py_object)()", "def __array__(self):\n return self.array" ]
[ "0.5632645", "0.55638945", "0.53828555", "0.53659517", "0.53659517", "0.5316151", "0.53118175", "0.52722055", "0.5270165", "0.5228894", "0.5194302", "0.519273", "0.51908255", "0.51729876", "0.5158492", "0.51555264", "0.51448303", "0.51444167", "0.5142811", "0.5137237", "0.5137237", "0.51262736", "0.5124977", "0.51209515", "0.5119688", "0.510733", "0.5098006", "0.50857204", "0.5079151", "0.5071898" ]
0.6210563
0
Hierarchically cluster the expression profiles.
def cluster(eps, linkage='average'): # TODO: your code here # Start by creating leaves for all the profiles and computing Euclidean distances between each pair. nodes = [ExpressionHierarchicalClusterLeaf(ep) for ep in eps] distances = {} for i in range(len(nodes)): for j in range(i + 1, len(nodes)): dis = sum(k * k for k in nodes[i].ep.values - nodes[j].ep.values) ** 0.5 distances[(nodes[i], nodes[j])] = dis # repeatedly find the closest pair of clusters and merge them into a new inner node that can be used in subsequent iterations. # meanwhile Compute cluster-profile and cluster-cluster distances, allowing the choice of average linkage while len(nodes) > 1: # find the closest pair min_dis = min(distances.values()) node1 = None node2 = None for k, v in distances.items(): if v == min_dis: node1 = k[0] node2 = k[1] # merge merged_node = ExpressionHierarchicalClusterInner(node1, node2) nodes.remove(node1) nodes.remove(node2) # https://stackoverflow.com/questions/11941817/how-to-avoid-runtimeerror-dictionary-changed-size-during-iteration-error for k1, k2 in list(distances.keys()): if k1 == node1 or k2 == node1 or k1 == node2 or k2 == node2: del distances[(k1, k2)] # calculate the new distance form other nodes to the merged node for other_node in nodes: pair_dis = [] for ep1 in other_node.ordered_profiles(): for ep2 in merged_node.ordered_profiles(): pair_dis.append(sum(k * k for k in ep1.values - ep2.values) ** 0.5) if linkage == "average": cluster_dis = sum(pair_dis) / len(pair_dis) elif linkage == "min": cluster_dis = min(pair_dis) else: cluster_dis = max(pair_dis) # update the distance dict distances[(other_node, merged_node)] = cluster_dis # add merged_node nodes.append(merged_node) return nodes[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cluster_hierarchically(active_sites):\n\n\n cls, sc = agglomerative(active_sites)\n\n return cls", "def clusters(self):\n raise NotImplementedError", "def test_get_hyperflex_cluster_profile_list(self):\n pass", "def assign_clusters(self):\n running_perts = {}\n for name in self.tensor_info:\n item = self.tensor_info[name]\n pert_list = item[1]\n pert_names = []\n prob_list = []\n if pert_list is not None:\n for pert in pert_list:\n pert_names.append(pert.__class__.__name__)\n prob_list.append(pert.p)\n pert_names = '_'.join(pert_names)\n if pert_names not in running_perts:\n running_perts[pert_names] = [(name, prob_list)]\n else:\n running_perts[pert_names].append((name, prob_list))\n\n running_perts.pop('')\n\n assert len(running_perts) <= len(self.clusters), \"More different perturbations than clusters available, cannot assign tensors to clusters\"\n\n # ONLY BITWISEPERT FOR THE TIME BEING\n bitwises = running_perts['BitwisePert']\n bitwise_probs = [item[1][0] for item in bitwises]\n centers, _ = kmeans(bitwise_probs, len(self.clusters))\n groups, _ = vq(bitwise_probs, centers)\n\n for tensor, cluster in zip(bitwises, groups):\n name = tensor[0]\n tensor_ref = self.tensor_info[name][0]\n repr = self.tensor_info[name][2]\n self.clusters[cluster].add_tensor(tensor_ref, repr)\n\n for cluster, rate in zip(self.clusters, centers):\n pert_dict = {\n \"name\": \"BitwisePert\",\n \"p\": rate}\n pert = P.construct_pert(pert_dict)\n cluster.set_perturb([pert])", "def constella(cur_plms, pc_starscape, group_iter, outfile_prefix):\n # Copy dataframe to avoid modifying the input dataframe\n cur_plms_copy = cur_plms.copy(deep=True)\n\n sanity_check_pos = 2 # Needs to point at days in image identifier!\n\n singleton_no = pc_starscape.shape[0]\n\n if params.debug is not None:\n print(f'{singleton_no} plms to group')\n\n plm_links = linkage(pc_starscape.loc[:, pc_starscape.columns[2:len(pc_starscape.columns)]].values, 'ward')\n\n # For n-1 to 2 leaves on the current hierarchical cluster dendrogram...\n for c in np.arange(singleton_no - 1, 2, -1):\n # Extract current number of clusters for the agglomeration step\n cutree = cut_tree(plm_links, n_clusters=c)\n # Generate a list of all current clusters identified\n group_list = np.unique(cutree)\n\n # For the current cluster being queried...\n for g in group_list:\n # Create list of current clusters row indices in pandas dataframe\n cur_index = [i for i, x in enumerate(cutree == g) if x]\n # Create list of current clusters present group identity assignments\n cur_index_id = np.array(cur_plms_copy.iloc[cur_index, 0])\n # Are any of the plms in the current cluster unnamed, how many?\n empty_count = np.count_nonzero(cur_index_id == None)\n empty_index = [i for (i, v) in zip(cur_index, cur_plms_copy.iloc[cur_index, 0].values == None) if v]\n # Are any of the plms in the current cluster already assigned an identity, what are those identities?\n unique_ids = np.unique(cur_index_id[np.array(cur_index_id) != None])\n\n # If cluster is two unnamed plms exactly, assign this group their own identity as a pair\n if empty_count == 2:\n pair_names = cur_plms_copy.iloc[empty_index, 1].values\n # Sanity check! Pairs must be on different days\n if pair_names[0].split('_')[sanity_check_pos] != pair_names[1].split('_')[sanity_check_pos]:\n cur_plms_copy.iloc[empty_index, 0] = group_iter\n group_iter = group_iter + 1\n else:\n cur_plms_copy.iloc[empty_index[0], 0] = group_iter\n cur_plms_copy.iloc[empty_index[1], 0] = group_iter + 1\n group_iter = group_iter + 2\n\n # For the identities that already exist...\n for uid in unique_ids:\n # If only one plm assigned a name in current cluster and a second unnamed plm exists\n # transfer ID over to create a pair\n if np.count_nonzero(np.array(cur_index_id) == uid) < 2 and empty_count == 1:\n # Store boolean positions for plms with IDs matching current id out of current cluster\n match_ids = [i for i, x in enumerate(cur_plms_copy.iloc[cur_index, 0].values == uid) if x]\n # Store boolean positions for plms which are unnamed out of current cluster\n null_ids = [i for i, x in enumerate(cur_plms_copy.iloc[cur_index, 0].values == None) if x]\n # If exactly 1 matching ID and 1 null ID (i.e. 2 plms total)\n # continue to pass ID name to the unnamed plm\n if len(match_ids) + len(null_ids) == 2:\n # Sanity check! Pairs must be on different days\n pair_names = cur_plms_copy.iloc[[cur_index[i] for i in match_ids + null_ids], 1].values\n if pair_names[0].split('_')[sanity_check_pos] != pair_names[1].split('_')[sanity_check_pos]:\n # Transfer identities to the unnamed plm\n cur_plms_copy.iloc[[cur_index[i] for i in null_ids], 0] = uid\n\n # Now that all groups that can be linked are formed, name rogues...\n rogues = [i for i, x in enumerate(cur_plms_copy.loc[:, 'group'].values == None) if x]\n for rogue in rogues:\n cur_plms_copy.iloc[[rogue], 0] = group_iter\n group_iter = group_iter + 1\n\n grpnames = cur_plms_copy.loc[:, ['group']].values\n plmnames = cur_plms_copy.loc[:, ['plmname']].values\n\n labelnames = []\n\n for li in range(0, len(plmnames)):\n labelnames.append(''.join(plmnames[li] + ' (' + str(int(grpnames[li])) + ')'))\n\n if params.debug is not None:\n plt.figure()\n plt.title('')\n plt.xlabel('')\n plt.ylabel('')\n dendrogram(plm_links, color_threshold=100, orientation=\"left\", leaf_font_size=10, labels=np.array(labelnames))\n plt.tight_layout()\n\n if params.debug == \"print\":\n plt.savefig(outfile_prefix + '_plmHCA.png')\n plt.close()\n elif params.debug == \"plot\":\n plt.show()\n\n return cur_plms_copy, group_iter", "def atlas_clusters():\n pass", "def Clusters(self):\n return", "def test_create_hyperflex_cluster_profile(self):\n pass", "def get_uniprot_clusters():\n\tjson_str = \"\"\n\tfor line in open(gpcr_tree_path, 'r'):\n\t\tjson_str += line \n\tgpcr_tree = json.loads(json_str)\n\n\tuniprot_clusters = []\n\t\n\tfor a in gpcr_tree[\"children\"]:\n\t\tfor b in a[\"children\"]:\n\t\t\tcluster = []\n\t\t\tfor uniprot in b[\"children\"]:\n\t\t\t\tuniprot_id = (str(uniprot['name']) + \"_human\").upper()\n\t\t\t\tcluster.append(uniprot_id)\n\t\t\tuniprot_clusters.append(cluster)\n\n\treturn uniprot_clusters", "def __init__(self, profiles):\n\t\tself.nodes = []\n\t\tfor i, profile in enumerate(profiles):\n\t\t\tuniNode = Node(i, {}, profile, truth = profile.name)\n\t\t\tself.nodes.append(uniNode)\n\t\tself.computeDistances()", "def __create_cluster_profiles(self,\n clustered_dataframes,\n shrunken_df,\n numerical_features,\n le_map,\n output_path,\n find_nearest_on_cols=False,\n show=True):\n\n def find_nearest(numbers, target):\n \"\"\"\n Find the closest fitting number to the target number\n \"\"\"\n numbers = np.asarray(numbers)\n idx = (np.abs(numbers - target)).argmin()\n return numbers[idx]\n\n cluster_profiles_df = pd.DataFrame(columns=shrunken_df.columns).drop(\n 'Cluster_Name', axis=1)\n rows_count = 0\n for cluster_identfier, cluster_dataframe in \\\n clustered_dataframes.items():\n df = pd.DataFrame(columns=cluster_dataframe.columns)\n df = df.append(cluster_dataframe.mean(), ignore_index=True)\n df.index = [cluster_identfier]\n\n if cluster_dataframe.shape[0] <= 1:\n continue\n\n # Attempt to convert numbers found within the full set of data\n for col in cluster_dataframe.columns:\n if col not in numerical_features or find_nearest_on_cols:\n df[col] = find_nearest(numbers=shrunken_df[\n col].value_counts().index.tolist(),\n target=df[col].values[0])\n\n # Evaluate cluster dataframe by dataframe\n eval_df = pd.DataFrame(columns=cluster_dataframe.columns)\n eval_df = eval_df.append(\n cluster_dataframe.mean(), ignore_index=True)\n eval_df = eval_df.append(\n cluster_dataframe.min(), ignore_index=True)\n eval_df = eval_df.append(\n cluster_dataframe.median(),\n ignore_index=True)\n eval_df = eval_df.append(\n cluster_dataframe.max(), ignore_index=True)\n eval_df = eval_df.append(\n cluster_dataframe.std(), ignore_index=True)\n eval_df = eval_df.append(\n cluster_dataframe.var(), ignore_index=True)\n eval_df.index = [\"Mean\", \"Min\", \"Median\",\n \"Max\", \"Standard Deviation\", \"Variance\"]\n\n if show:\n print(\"Total found in {0} is {1}\".format(\n cluster_identfier, cluster_dataframe.shape[0]))\n self.__render_mpl_table(\n df,\n sub_dir=output_path,\n filename=cluster_identfier +\n \"_Means_Rounded_To_Nearest_Real_Numbers\",\n header_columns=0,\n col_width=4.0)\n\n self.__render_mpl_table(\n eval_df,\n sub_dir=output_path,\n filename=cluster_identfier +\n \"_Eval_Df\",\n header_columns=0,\n col_width=4.0)\n display(df)\n display(eval_df)\n self.__vertical_spacing(7)\n\n cluster_profiles_df = cluster_profiles_df.append(\n self.__decode_df(df, le_map))\n\n rows_count += cluster_dataframe.shape[0]\n\n return rows_count, cluster_profiles_df", "def profile_example_cluster(self):\n self.profile_arnaud_bestfit() # Set default shape parameters\n self.M500 = 1e14\n self.r500 = 1.\n self.z = 0.5", "def groupwise_jaccard(\n self,\n profiles: Iterable[Iterable[str]],\n predicate: Optional[URIRef] = RDFS['subClassOf']) -> float:\n # Filter out negative phenotypes\n profile_union = set()\n profile_intersection = set()\n\n is_first = True\n for profile in profiles:\n profile_union = profile_union.union(\n owl_utils.get_profile_closure(\n profile, self.graph, self.root, predicate)\n )\n if is_first:\n profile_intersection = owl_utils.get_profile_closure(\n profile, self.graph, self.root, predicate\n )\n is_first = False\n else:\n profile_intersection = profile_intersection.intersection(\n owl_utils.get_profile_closure(\n profile, self.graph, self.root, predicate\n )\n )\n\n return len(profile_intersection)/len(profile_union)", "def hierarical_clustering(p_df, method=\"average\"):\n pdf_values = p_df.values\n np.fill_diagonal(pdf_values, 0)\n pdf_values_1_d = matrix_to_squareform(pdf_values)\n cluster_matrix = linkage(pdf_values_1_d, method)\n return cluster_matrix", "def metis_partition(G):\n partition_list = partition(G, 2)[1]\n for i in range(2):\n for username in partition_list[i]:\n G.add_node(username, cluster=i)\n \n return G", "def create_subexperiments(self):\n subexperiments = {}\n for label, df in self.design.groupby(level=0):\n subexperiments[label] = SubExperiment(label, df.loc[label], self.root)\n return subexperiments", "def load_expression_cluster_data(self):\n logger.info(\"Loading expression cluster data from file\")\n if self.expression_cluster_anatomy_data is not None:\n self._load_expression_cluster_file(self.expression_cluster_anatomy_cache_path,\n self.expression_cluster_anatomy_url,\n self.expression_cluster_anatomy_data,\n add_to_expression_ontology_annotations=True)\n if self.expression_cluster_molreg_data is not None:\n self._load_expression_cluster_file(self.expression_cluster_molreg_cache_path,\n self.expression_cluster_molreg_url,\n self.expression_cluster_molreg_data)\n if self.expression_cluster_genereg_data is not None:\n self._load_expression_cluster_file(self.expression_cluster_genereg_cache_path,\n self.expression_cluster_genereg_url,\n self.expression_cluster_genereg_data)", "def evaulate_clusters(self, pred_dict, model_dir):\n\t\tclustering_dict = {\"Topic\":[], \"Text\":[], \"Keywords\": []}\n\t\tfor cluster_num, sents_list in pred_dict.items():\n\t\t\tprint(\"\\n cluster number : \", cluster_num)\n\t\t\tprint(\"\\n number of sents : \", len(sents_list))\n\t\t\ttfidf_vec = TfidfVectorizer(use_idf=True, sublinear_tf=True, max_df=0.8, max_features=20, ngram_range=(1,5), min_df=1)\n\t\t\tX_tfidf = tfidf_vec.fit_transform(sents_list).toarray()\n\t\t\ttotal_tfidf = tfidf_vec.get_feature_names()\n\t\t\tfor sent in sents_list:\n\t\t\t\tclustering_dict[\"Topic\"].append(cluster_num)\n\t\t\t\tclustering_dict[\"Text\"].append(sent)\n\t\t\t\tclustering_dict[\"Keywords\"].append(\",\".join(total_tfidf))\n\t\t\"\"\" save the clusters to csv file \"\"\"\n\t\tdf_dominant_topic = defaultdict(list) \n\t\tdf_dominant_topic[\"Topic\"] = clustering_dict[\"Topic\"]\n\t\tdf_dominant_topic[\"Text\"] = clustering_dict[\"Text\"]\n\t\tdf_dominant_topic[\"Keywords\"] = clustering_dict[\"Keywords\"]\n\t\tdf_dominant_topic = pd.DataFrame(df_dominant_topic)\n\t\tdf_dominant_topic.to_csv(os.path.join(model_dir, \"cluster_sentence_topic_mapping.csv\"))\n\t\treturn df_dominant_topic", "def getHierarchies():", "def getHierarchies():", "def cluster_membership_occupancy(data):\n \n \n \n n_clusters = len(set(data['clusters'])-{-1}) # since -1 element denotes noice\n\n if n_clusters == 0:\n membership=[Cluster_Membership_Features()]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n areas=[Cluster_Area_Features()]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features()]\n density = pd.DataFrame([o.__dict__ for o in density])\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n \n elif n_clusters ==1:\n #obtain_total_cluster_areas_set_everything_else_to_default\n membership=[Cluster_Membership_Features()]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n \n try:\n cluster_chull_areas=[ss.ConvexHull(np.column_stack([d[i]['X'].array,d[i]['Y'].array])).volume for i in d.keys()]\n except:\n cluster_chull_areas=[0,0,0]\n \n Total_cluster_area=np.sum(cluster_chull_areas)\n areas=[Cluster_Area_Features([Total_cluster_area,0,0,0,0,0,0,0,0])]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features()]\n density = pd.DataFrame([o.__dict__ for o in density])\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n \n elif n_clusters >1:\n #Summarizing the cluster membership distribution characteristics\n cluster_size_nums=np.delete(np.array(data.groupby(['clusters']).size()),0)\n (cluster_size_nums_avg,cluster_size_nums_min,cluster_size_nums_max,\n cluster_size_nums_std,cluster_size_nums_cv,cluster_size_nums_cd,\n cluster_size_nums_IQR,cluster_size_nums_Quartile_CD)= distribution_statistics(cluster_size_nums)\n\n #For each cluster calculate the area by calculating the area of the convex hull of cluster members\n # Note: concavehull implementation here might be a good addition as it will provide more imformative values. \n\n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n try:\n cluster_chull_areas=[ss.ConvexHull(np.column_stack([d[i]['X'].array,d[i]['Y'].array])).volume for i in d.keys()]\n except:\n cluster_chull_areas=[0,0,0,0,0]\n \n\n (avg_cluster_area,min_cluster_area,max_cluster_area,\n std_cluster_area,CV_cluster_area,CD_cluster_area,\n IQR_cluster_area,Quartile_CD_cluster_area)= distribution_statistics(cluster_chull_areas)\n Total_cluster_area=np.sum(cluster_chull_areas)\n\n #Calculate cluster density: number of nuclei/ convex area of cluster\n cluster_density=np.divide(cluster_size_nums,cluster_chull_areas)\n (avg_cluster_density,min_cluster_density,max_cluster_density,\n std_cluster_density,CV_cluster_density,CD_cluster_density,\n IQR_cluster_density,Quartile_CD_cluster_density)= distribution_statistics(cluster_density)\n\n #return dataframe of features\n membership=[Cluster_Membership_Features([cluster_size_nums_avg,cluster_size_nums_min,cluster_size_nums_max,\n cluster_size_nums_std,cluster_size_nums_cv,cluster_size_nums_cd,\n cluster_size_nums_IQR,cluster_size_nums_Quartile_CD])]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n areas=[Cluster_Area_Features([Total_cluster_area,\n avg_cluster_area,min_cluster_area,max_cluster_area,\n std_cluster_area,CV_cluster_area,CD_cluster_area,\n IQR_cluster_area,Quartile_CD_cluster_area])]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features([avg_cluster_density,min_cluster_density,max_cluster_density,\n std_cluster_density,CV_cluster_density,CD_cluster_density,\n IQR_cluster_density,Quartile_CD_cluster_density])]\n density = pd.DataFrame([o.__dict__ for o in density])\n\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n return all_features", "def cluster(self):\n assert False", "def clustering(self): \n clusterOfFiles=self.getClusters()\n \n #group files based on the hash of their contents\n self.keyingMethod=md5Hash\n [self.addFile(afile) for acluster in clusterOfFiles for afile in acluster]\n clusterOfFiles=self.getClusters()\n self.showClusters(clusterOfFiles)", "def partition(examples):\n\n cluster_examples = [[] for _ in range(0, cluster_count)]\n for example in examples:\n cluster_examples[example.type].append(example)\n\n return cluster_examples", "def cluster(self):\n\t\tself.index[\"cluster\"] = {}\n\n\t\tfor item in self.index[\"items\"]:\n\t\t\tself.index[\"cluster\"][item] = [{\"weight\" : float(len(set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id]))))/float(len(self.index[\"items\"][item])) , \"name\" : id, \"authority\" : set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id])) } for id in self.index[\"items\"] if id != item and len(set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id]))) >= 1]\n\n\t\treturn self.index", "def test_get_hyperflex_node_profile_list(self):\n pass", "def cluster(self):\r\n\t\tself.clusterer.fit(self.koopman_feature_array)\r\n\t\tself.labels = self.clusterer.labels_\r\n\t\tfor j in range(max(self.labels)+1):\r\n\t\t\tself.koop_cluster_list.append([self.koop_list[i] for i in range(len(self.labels)) if self.labels[i] == j])\r\n\t\t\tself.koop_cluster_memb_prob_list.append([self.clusterer.probabilities_[i] for i in range(len(self.labels)) if self.labels[i] == j])", "def evaluate(self):\n\t\tself.peak = np.load(\"peaks_361010.npy\")\n\n\t\tprint(self.peak)\n\t\tpeaks = self.peak.tolist()\n\t\tunique_peaks = []\n\t\tfor item in peaks:\n\t\t\titem_round = ['%.4f' % elm for elm in item]\n\t\t\t\n\t\t\t# =================================================\n\t\t\t# item_round_1 = ['%.2f' % elm for elm in item]\n\t\t\t# item_round = ['%.1f' % float(elm) for elm in item_round_1]\n\t\t\t# =================================================\n\n\t\t\tif item_round not in unique_peaks:\n\t\t\t\tunique_peaks.append(item_round)\n\t\tprint(unique_peaks)\n\t\tself.clusters = len(unique_peaks)\n\t\tprint(\"Number of clusters: \", self.clusters)\n\n\t\tfor i in range(self.num):\n\t\t\tpk = self.peak[i]\n\t\t\tpk_ = ['%.4f' % elm for elm in pk]\n\n\t\t\t# ======================================\n\t\t\t# pk_1 = ['%.2f' % elm for elm in pk]\n\t\t\t# pk_ = ['%.1f' % float(elm) for elm in pk_1]\n\n\t\t\tind = unique_peaks.index(pk_)\n\t\t\tself.pred[i] = ind\n\n\t\t# self.plotPred()\t\t# Visualize clustering result, if the feature of data is 2-dim\n\t\treturn self.pred + 1\t# Making the labels star from '1' rather than '0'", "def _Clustered(self):\n def cluster_func(symbol):\n name = symbol.full_name\n if not name or symbol.IsStringLiteral():\n # min_count=2 will ensure order is maintained while not being grouped.\n # \"&\" to distinguish from real symbol names, id() to ensure uniqueness.\n name = '&' + hex(id(symbol))\n elif name.startswith('*'):\n # \"symbol gap 3\" -> \"symbol gaps\"\n name = re.sub(r'\\s+\\d+( \\(.*\\))?$', 's', name)\n # Never cluster symbols that span multiple paths so that all groups return\n # non-None path information.\n diff_status = None\n if symbol.IsDelta():\n diff_status = symbol.diff_status\n if symbol.object_path or symbol.full_name.startswith('**'):\n return (symbol.object_path, name, diff_status)\n return (symbol.address, name, diff_status)\n\n # Use a custom factory to fill in name & template_name.\n def group_factory(token, symbols):\n full_name = token[1]\n sym = symbols[0]\n if token[1].startswith('*'):\n return self._CreateTransformed(symbols,\n full_name=full_name,\n template_name=full_name,\n name=full_name,\n section_name=sym.section_name)\n return self._CreateTransformed(symbols,\n full_name=full_name,\n template_name=sym.template_name,\n name=sym.name,\n section_name=sym.section_name)\n\n # A full second faster to cluster per-section. Plus, don't need create\n # (section_name, name) tuples in cluster_func.\n ret = []\n for section in self.GroupedByContainerAndSectionName():\n ret.extend(section.GroupedBy(\n cluster_func, min_count=2, group_factory=group_factory))\n\n return self._CreateTransformed(ret)", "def compute_clusters(self, p: float):\n pass" ]
[ "0.5672239", "0.5575119", "0.52612776", "0.51963127", "0.51467055", "0.514348", "0.509445", "0.49672025", "0.49633887", "0.4957409", "0.49469635", "0.49469188", "0.4851559", "0.48466492", "0.48371315", "0.4828702", "0.4827677", "0.4780171", "0.47612554", "0.47612554", "0.47474012", "0.47402045", "0.47401997", "0.4722941", "0.47155294", "0.47011515", "0.46517685", "0.4648736", "0.46427613", "0.46421698" ]
0.56489336
1
Routes related to metrics are delegated to OtterMetrics.
def metrics(self, request): return OtterMetrics(self.store).app.resource()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_metrics(self, from_time=None, to_time=None, metrics=None, view=None):\n return self._get_resource_root().get_metrics(self._path() + '/metrics',\n from_time, to_time, metrics, view)", "def set_metrics(self):", "def metrics(self):\n raise NotImplementedError(\"metrics\")", "def AppMetricsView(request): # pylint: disable=invalid-name\n return ExportToDjangoView(request, view=\"app-metrics\")", "def lambda_handler(event, context):\n get_other_metrics(event)", "def route(self):\n pass", "def endpoint_metrics(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/metrics', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/metrics' % endpoint_name, 'GET')\n return body", "def metrics(self, metrics):\n\n self._metrics = metrics", "def metrics(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'metrics')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)", "def list_metrics(self):\n pass", "async def handle_metrics(request: Request):\n content, http_headers = aioprometheus.render(\n app.registry, [request.headers.get(\"accept\")]\n )\n return Response(content, headers=http_headers)", "def test_metrics(self):\n # Check the route\n self.check_metrics(self.test_metrics_submission_id, False, \"award\")\n self.check_metrics(self.test_metrics_submission_id, True, \"award_financial\")\n self.check_metrics(self.test_metrics_submission_id, True, \"appropriations\")", "def register_metrics(app, app_version=None, app_config=None):\n\n app.before_request(before_request)\n app.after_request(after_request)\n # APP_INFO.info({\"version\": app_version, \"config\": app_config})", "def endpoint_metrics_set(self, endpoint_name=None, metrics=None):\n if metrics is None:\n raise Exception(\"Metrics required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint/metrics', 'POST', body=metrics)\n else:\n self.request('/v1.1/endpoints/%s/metrics' % endpoint_name, 'POST', body=metrics)", "def __create_routes__(self):\n self.app.add_url_rule('/', 'main_page', self.main_page)\n self.app.add_url_rule('/day', 'day', self.get_current_iteration, methods=['GET'])\n self.app.add_url_rule('/traders', 'traders', self.register_trader, methods=['POST'])\n self.app.add_url_rule('/traders/<id>', 'trader', self.get_trader_state, methods=['GET'])\n self.app.add_url_rule('/stock/price', 'price', self.get_stock_price, methods=['GET'])\n self.app.add_url_rule('/stock/history', 'history', self.get_stock_price_history, methods=['GET'])\n self.app.add_url_rule('/stock/buy', 'buy', self.place_buy_order, methods=['POST'])\n self.app.add_url_rule('/stock/sell', 'sell', self.place_sell_order, methods=['POST'])\n self.app.add_url_rule('/simulation/step', 'step', self.market_session_step, methods=['POST'])\n self.app.add_url_rule('/simulation/run', 'run', self.run_simulation, methods=['POST'])", "def get_metrics(self):\n return None", "def add_routes(self):\n pass", "def decorator(self, decorator: Route.Decorator):\n pass", "def route(cls, url, method='GET'):\n def route_decorator(func):\n item = (url, method, func)\n cls._docoratedRouteHandlers.append(item)\n return func\n return route_decorator", "def add_metrics(self, metrics):\n for i, metric in enumerate(self.config.metrics):\n tf.summary.scalar(metric, metrics[i])", "def _stub_cluster_stats_routes(self):\n self._routes['/stats/cluster/nodes'] = HandlerInfo(\n handler_class=Respond404Handler,\n init_kwargs=dict(reason='Only master node provides cluster stats')\n )\n self._routes['/stats/cluster/processes'] = HandlerInfo(\n handler_class=Respond404Handler,\n init_kwargs=dict(reason='Only master node provides cluster stats')\n )\n self._routes['/stats/cluster/proxies'] = HandlerInfo(\n handler_class=Respond404Handler,\n init_kwargs=dict(reason='Only master node provides cluster stats')\n )", "def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')", "def metrics_group():", "def _get_metrics(self, *args, **kwargs):\n logger.warning(\"Could not get metric. No function registered.\")", "def get_route_metrics(args):\n db = db_session.get_db_read_replica()\n with db.scoped_session() as session:\n return _get_route_metrics(session, args)", "def test_success_metrics(self):\n @self.graph.route(self.ns.collection_path, Operation.Search, self.ns)\n def foo():\n return \"\"\n\n response = self.client.get(\"api/v1/foo\")\n assert_that(response.status_code, is_(equal_to(200)))\n\n self.graph.metrics.histogram.assert_called_with(\n \"route\",\n ANY,\n tags=[\n \"endpoint:foo.search.v1\",\n \"backend_type:microcosm_flask\",\n ],\n )\n self.graph.metrics.increment.assert_called_with(\n \"route.call.count\",\n tags=[\n \"endpoint:foo.search.v1\",\n \"backend_type:microcosm_flask\",\n \"classifier:2xx\",\n ],\n )", "def route(app, requires_login):\n routes = {\n '/kontoplan/<accounting>': kontoplan,\n '/huvudbok/<accounting>': huvudbok,\n '/balansrakning/<accounting>': balance_report,\n '/resultatrakning/<accounting>': income_statement_report,\n '/verifikationslista/<accounting>': verifications,\n '/arsrapport/<accounting>': year_report,\n '/verifikat/<objectid:verification>': print_verification,\n '/vatreport/<objectid:accounting>': vat_report,\n '/periodrapport/<accounting>': period_report,\n '/salesreport/<objectid:toid>': sales_report,\n '/verifikationslista_andrade/<accounting>': verifications_modified,\n '/accountspayable_report/<accounting>': accountspayable_report,\n '/accountspayable_paymentjournal/<accounting>': accountspayable_paymentjournal\n }\n for route, func in routes.items():\n name = func.__name__\n func = requires_login()(func)\n app.add_url_rule(route, name, func, methods=['GET', 'POST'])", "def initialize_prometheus_middleware(app, endpoint='/metrics'):\n if get_setting('DEPLOYED'):\n logger.info(f'Enabling Prometheus endpoint on: \"{endpoint}\"')\n app.add_middleware(PrometheusMiddleware)\n app.add_route(endpoint, metrics)", "def QueueMetricsView(request): # pylint: disable=invalid-name\n return ExportToDjangoView(request, view=\"rq-metrics\")" ]
[ "0.5967554", "0.5963758", "0.5958907", "0.58173203", "0.5702679", "0.5702003", "0.5692182", "0.56049716", "0.55973095", "0.5572481", "0.5559284", "0.5490586", "0.54229575", "0.5382848", "0.53609043", "0.534358", "0.5328718", "0.53084874", "0.5297921", "0.5258105", "0.5254089", "0.5243121", "0.52354586", "0.52351", "0.5230409", "0.520088", "0.51922053", "0.5185882", "0.5179139", "0.51727074" ]
0.65490466
0
Assert that creating an amendment statement with secured party and debtor edits worksa as expected.
def test_create_amendment_edit(session, client, jwt, description, data, sp_amend_id, debtor_amend_id): json_data = copy.deepcopy(data) if sp_amend_id is not None: json_data['addSecuredParties'][0]['amendPartyId'] = sp_amend_id else: del json_data['addSecuredParties'][0]['amendPartyId'] if debtor_amend_id is not None: json_data['addDebtors'][0]['amendPartyId'] = debtor_amend_id else: del json_data['addDebtors'][0]['amendPartyId'] current_app.config.update(PAYMENT_SVC_URL=MOCK_PAY_URL) current_app.config.update(AUTH_SVC_URL=MOCK_URL_NO_KEY) response = client.post('/api/v1/financing-statements/TEST0001/amendments', json=json_data, headers=create_header_account(jwt, [PPR_ROLE, STAFF_ROLE], 'test-user', STAFF_ROLE), content_type='application/json') assert response.status_code == HTTPStatus.CREATED result = response.json if sp_amend_id is None or sp_amend_id > 0: assert result['changes'][0]['addSecuredParties'][0].get('former_name') else: assert 'former_name' not in result['changes'][0]['addSecuredParties'][0] if debtor_amend_id is None or debtor_amend_id > 0: assert result['changes'][0]['addDebtors'][0].get('former_name') else: assert 'former_name' not in result['changes'][0]['addDebtors'][0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_approve_agreement(self):\n pass", "def test_owner_edit_assessment_valid(self):\n req = {'weight': 60, 'additional_description': 'asdfqwer'}\n response = self.user_01.put(self.assessment_report_url, req)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n response = self.user_01.patch(self.assessment_report_url, req)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['weight'], req['weight'])\n self.assertEqual(response.data['additional_description'], req['additional_description'])", "def test_create_amendment(session, client, jwt, desc, json_data, roles, status, has_account, reg_num):\n headers = None\n # setup\n current_app.config.update(PAYMENT_SVC_URL=MOCK_PAY_URL)\n current_app.config.update(AUTH_SVC_URL=MOCK_URL_NO_KEY)\n if has_account and BCOL_HELP in roles:\n headers = create_header_account(jwt, roles, 'test-user', BCOL_HELP)\n elif has_account and STAFF_ROLE in roles:\n headers = create_header_account(jwt, roles, 'test-user', STAFF_ROLE)\n elif has_account and GOV_ACCOUNT_ROLE in roles:\n headers = create_header_account(jwt, roles, 'test-user', '1234')\n elif has_account:\n headers = create_header_account(jwt, roles)\n else:\n headers = create_header(jwt, roles)\n\n # test\n response = client.post('/api/v1/financing-statements/' + reg_num + '/amendments',\n json=json_data,\n headers=headers,\n content_type='application/json')\n\n # check\n # print('Response data:')\n # print(response.json)\n assert response.status_code == status", "def test_apply_endorsements(self):", "def test_get_amendment(session, client, jwt, desc, roles, status, has_account, reg_num, base_reg_num):\n current_app.config.update(AUTH_SVC_URL=MOCK_URL_NO_KEY)\n headers = None\n # setup\n if status == HTTPStatus.UNAUTHORIZED and desc.startswith('Report'):\n headers = create_header_account_report(jwt, roles)\n elif has_account and BCOL_HELP in roles:\n headers = create_header_account(jwt, roles, 'test-user', BCOL_HELP)\n elif has_account and STAFF_ROLE in roles:\n headers = create_header_account(jwt, roles, 'test-user', STAFF_ROLE)\n elif has_account and GOV_ACCOUNT_ROLE in roles:\n headers = create_header_account(jwt, roles, 'test-user', GOV_ACCOUNT_ROLE)\n elif has_account:\n headers = create_header_account(jwt, roles)\n else:\n headers = create_header(jwt, roles)\n\n # test\n response = client.get('/api/v1/financing-statements/' + base_reg_num + '/amendments/' + reg_num,\n headers=headers)\n\n # check\n assert response.status_code == status\n # basic verification statement data check\n if status == HTTPStatus.OK:\n json_data = response.json\n assert json_data['amendmentRegistrationNumber'] == reg_num\n assert len(json_data['changes']) >= 1\n assert json_data['changes'][0]['amendmentRegistrationNumber'] == reg_num\n if desc != 'Mismatch registrations staff':\n assert json_data['baseRegistrationNumber'] == base_reg_num\n assert json_data['changes'][0]['baseRegistrationNumber'] == base_reg_num", "def test_owner_edit_assessment(self):\n req, resp = data.assessment_02_request, data.assessment_02_response\n resp['contract'] = self.contract['id']\n\n response = self.user_01.put(self.assessment_custom_url, req)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n response.data.pop('id')\n assert_assessment_response(self, response, resp)\n\n response = self.user_01.patch(self.assessment_custom_url, req)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n response.data.pop('id')\n assert_assessment_response(self, response, resp)", "def _perform_assertion(self,\n balance_orig: list,\n balance_exp: list,\n account_deltas: dict):\n\n results_annual = cpfhelpers.calc_annual_change(\n self.salary * 12,\n self.bonus,\n self.dob,\n balance_orig[0],\n balance_orig[1],\n balance_orig[2],\n account_deltas=account_deltas,\n date_start=self.date_start)\n\n assert str(round(balance_exp[0], 2)) == results_annual[strings.OA]\n assert str(round(balance_exp[1], 2)) == results_annual[strings.SA]\n assert str(round(balance_exp[2], 2)) == results_annual[strings.MA]", "def test_amendment_success(session, client, jwt):\n # setup\n current_app.config.update(PAYMENT_SVC_URL=MOCK_PAY_URL)\n rv1 = create_financing_test(session, client, jwt)\n assert rv1.status_code == HTTPStatus.CREATED\n assert rv1.json['baseRegistrationNumber']\n base_reg_num = rv1.json['baseRegistrationNumber']\n\n json_data = copy.deepcopy(SAMPLE_JSON)\n json_data['baseRegistrationNumber'] = base_reg_num\n json_data['debtorName']['businessName'] = 'TEST BUS 2 DEBTOR'\n json_data['changeType'] = 'AM'\n del json_data['courtOrderInformation']\n del json_data['createDateTime']\n del json_data['amendmentRegistrationNumber']\n del json_data['payment']\n del json_data['removeTrustIndenture']\n del json_data['addTrustIndenture']\n del json_data['addSecuredParties']\n del json_data['deleteSecuredParties']\n del json_data['addDebtors']\n del json_data['deleteDebtors']\n del json_data['deleteGeneralCollateral']\n del json_data['addGeneralCollateral']\n del json_data['deleteVehicleCollateral']\n del json_data['documentId']\n json_data['deleteVehicleCollateral'] = rv1.json['vehicleCollateral']\n\n # test\n rv = client.post('/api/v1/financing-statements/' + base_reg_num + '/amendments',\n json=json_data,\n headers=create_header_account(jwt, [PPR_ROLE]),\n content_type='application/json')\n\n # check\n assert rv.status_code == HTTPStatus.CREATED\n json_data = rv.json\n assert 'amendmentRegistrationNumber' in json_data\n assert len(json_data['changes']) >= 1\n assert 'amendmentRegistrationNumber' in json_data['changes'][0]\n assert json_data['baseRegistrationNumber'] == base_reg_num\n assert json_data['changes'][0]['baseRegistrationNumber'] == base_reg_num", "def _perform_assertion(self,\n balance_orig: list,\n balance_exp: list,\n dob: str):\n\n results_annual = cpfhelpers.calc_annual_change(\n self.salary * 12,\n self.bonus,\n dob,\n balance_orig[0],\n balance_orig[1],\n balance_orig[2],\n account_deltas={},\n date_start=self.date_start)\n\n assert str(round(balance_exp[0], 2)) == results_annual[strings.OA]\n assert str(round(balance_exp[1], 2)) == results_annual[strings.SA]\n assert str(round(balance_exp[2], 2)) == results_annual[strings.MA]", "def test_other_users_edit_assessment(self):\n req, resp = data.get_assessment(self.contract['id'])\n\n response = self.user_02.put(self.assessment_list_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n response = self.convener.put(self.assessment_list_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n response = self.supervisor_formal.put(self.assessment_list_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n response = self.user_02.patch(self.assessment_list_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n response = self.convener.patch(self.assessment_list_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n response = self.supervisor_formal.patch(self.assessment_list_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_other_users_edit_assessment(self):\n req, resp = data.get_assessment(self.contract['id'])\n\n response = self.user_02.put(self.assessment_list_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n response = self.convener.put(self.assessment_list_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n response = self.supervisor_formal.put(self.assessment_list_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n response = self.user_02.patch(self.assessment_list_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n response = self.convener.patch(self.assessment_list_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n response = self.supervisor_formal.patch(self.assessment_list_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_change_provisioned_throughput_usual_case():", "def test_create_amendment_staff(session, client, jwt, role, routing_slip, bcol_number, dat_number, status):\n # setup\n current_app.config.update(PAYMENT_SVC_URL=MOCK_PAY_URL)\n json_data = copy.deepcopy(STATEMENT_VALID)\n params = ''\n if routing_slip:\n params = '?routingSlipNumber=' + str(routing_slip)\n elif bcol_number:\n params = '?bcolAccountNumber=' + str(bcol_number)\n if dat_number:\n params += '&datNumber=' + str(dat_number)\n print('params=' + params)\n\n # test\n response = client.post('/api/v1/financing-statements/TEST0001/amendments' + params,\n json=json_data,\n headers=create_header_account(jwt, [PPR_ROLE, role], 'test-user', role),\n content_type='application/json')\n\n # check\n assert response.status_code == status\n if response.status_code == HTTPStatus.CREATED:\n reg_num = response.json['amendmentRegistrationNumber']\n registration: Registration = Registration.find_by_registration_number(reg_num, 'PS12345', True)\n assert registration.verification_report", "def test_reject_agreement(self):\n pass", "def _perform_assertion(self,\n balance_orig: list,\n balance_exp: list):\n\n results_annual = cpfhelpers.calc_annual_change(\n self.salary * 12,\n self.bonus,\n self.dob,\n balance_orig[0],\n balance_orig[1],\n balance_orig[2],\n account_deltas={},\n date_start=self.date_start)\n\n assert str(round(balance_exp[0], 2)) == results_annual[strings.OA]\n assert str(round(balance_exp[1], 2)) == results_annual[strings.SA]\n assert str(round(balance_exp[2], 2)) == results_annual[strings.MA]", "def test_leave_accrual_access_rights(self):\n accrual = self.employee.get_leave_accrual(self.leave_type.id)\n accrual.write({\n 'line_ids': [(0, 0, {\n 'name': 'Test',\n 'amount_cash': 100,\n 'date': datetime.now(),\n })],\n })\n\n self.assertRaises(\n Exception,\n accrual.sudo(self.user_3.id).check_access_rule, 'read')\n\n self.assertRaises(\n Exception,\n accrual.sudo(self.user_2.id).check_access_rights, 'write')\n\n accrual.sudo(self.user_1.id).check_access_rule('read')\n self.assertTrue(\n accrual.sudo(self.user_1.id).check_access_rights('read'))\n\n # The manager can not access the leave accruals of the employee 2\n # because he is not the employee's manager\n accrual_2 = self.employee_2.get_leave_accrual(self.leave_type.id)\n\n self.assertRaises(\n Exception,\n accrual_2.sudo(self.user_1.id).check_access_rule, 'read')\n\n self.user_1.write({\n 'groups_id': [(4, self.ref('base.group_hr_manager'))]})\n\n for operation in ['read', 'write', 'create', 'unlink']:\n accrual_2.sudo(self.user_1.id).check_access_rule(operation)\n self.assertTrue(\n accrual_2.sudo(self.user_1.id).check_access_rights(operation))", "def test_create_warranty(self):\n pass", "def test_edition_of_other_users_aid(client, contributor):\n\n aid = AidFactory()\n form_url = reverse('aid_edit_view', args=[aid.slug])\n client.force_login(contributor)\n res = client.get(form_url)\n assert res.status_code == 404", "def test_only_owner_change_change_policy(crowdsale, customer, signer_address):\n\n with pytest.raises(TransactionFailed):\n crowdsale.transact({\"from\": customer}).setRequireSignedAddress(True, signer_address)", "def test_evidence_change_assmt(self):\n with factories.single_commit():\n evidence_url = \"test.com\"\n evidence_file = \"test_gdrive.file\"\n evidence_1 = factories.EvidenceUrlFactory(link=evidence_url,\n title=evidence_url)\n evidence_2 = factories.EvidenceFileFactory(link=evidence_file,\n title=evidence_file)\n response = self.api.put(self.assessment, {\n \"actions\": {\"add_related\": [\n {\n \"id\": evidence_1.id,\n \"type\": \"Evidence\",\n },\n {\n \"id\": evidence_2.id,\n \"type\": \"Evidence\",\n },\n ]}\n })\n self.assert200(response)\n notifs, notif_data = common.get_daily_notifications()\n updated = notif_data[\"[email protected]\"][\"assessment_updated\"]\n self.assertEqual(len(notifs), 1)\n self.assertEqual(\n updated[self.assessment.id][\"updated_data\"][\"EVIDENCE URL\"],\n (evidence_url, \"\")\n )\n self.assertEqual(\n updated[self.assessment.id][\"updated_data\"][\"EVIDENCE FILE\"],\n (evidence_file, \"\")\n )", "def test_alteration_process(app, session, orig_legal_type, new_legal_type):\n # setup\n identifier = 'BC1234567'\n business = create_business(identifier)\n business.legal_type = orig_legal_type\n\n alteration_filing = copy.deepcopy(FILING_HEADER)\n alteration_filing['filing']['business']['legalType'] = orig_legal_type\n alteration_filing['filing']['alteration'] = copy.deepcopy(ALTERATION)\n alteration_filing['filing']['alteration']['business']['legalType'] = new_legal_type\n payment_id = str(random.SystemRandom().getrandbits(0x58))\n filing_submission = create_filing(payment_id, alteration_filing, business_id=business.id)\n\n # test\n alteration.process(business=business,\n filing_submission=filing_submission,\n filing=alteration_filing['filing'])\n\n # validate\n assert business.legal_type == new_legal_type", "def testAssistantOwnershipAfterEdit(self):\n self.failUnless(self._testAssistantOwnershipAfter(task='edit'), \"designated assistant is not listed as an owner\")", "def testOwnershipAfterEdit(self):\n self.simulateATGUIInteraction(task='edit')\n self.failUnlessEqual(self.person.getOwnerTuple()[1], 'abc123')", "def test_client_bank_account_update(self):\n pass", "def test_approve(self):\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.utils.account.login_as(username,userpass)\n\n self.contribtool.approve(TOOLNAME,TOOLLICENSEDATA)", "def test_superuser_edit_assessment(self):\n req, resp = data.assessment_02_request, data.assessment_02_response\n resp['contract'] = self.contract['id']\n\n response = self.superuser.put(self.assessment_custom_url, req)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n response = self.superuser.patch(self.assessment_custom_url, req)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_submit_for_endorsement(self):", "def test_kyc_post_legal_board_member(self):\n pass", "def test_kyc_post_legal(self):\n pass", "def test_client_risk_assessment_partial_update(self):\n pass" ]
[ "0.66511834", "0.6303875", "0.630162", "0.6266475", "0.6166075", "0.6130697", "0.6124604", "0.6123696", "0.6073379", "0.60479134", "0.60479134", "0.6042392", "0.6023422", "0.6012972", "0.6002221", "0.59952265", "0.5974059", "0.5972718", "0.59536445", "0.5933681", "0.5923553", "0.58931744", "0.5877792", "0.5877421", "0.58725786", "0.5870533", "0.58620816", "0.5856005", "0.5840853", "0.58130324" ]
0.7524552
0
Assert that a get amendment registration statement works as expected.
def test_get_amendment(session, client, jwt, desc, roles, status, has_account, reg_num, base_reg_num): current_app.config.update(AUTH_SVC_URL=MOCK_URL_NO_KEY) headers = None # setup if status == HTTPStatus.UNAUTHORIZED and desc.startswith('Report'): headers = create_header_account_report(jwt, roles) elif has_account and BCOL_HELP in roles: headers = create_header_account(jwt, roles, 'test-user', BCOL_HELP) elif has_account and STAFF_ROLE in roles: headers = create_header_account(jwt, roles, 'test-user', STAFF_ROLE) elif has_account and GOV_ACCOUNT_ROLE in roles: headers = create_header_account(jwt, roles, 'test-user', GOV_ACCOUNT_ROLE) elif has_account: headers = create_header_account(jwt, roles) else: headers = create_header(jwt, roles) # test response = client.get('/api/v1/financing-statements/' + base_reg_num + '/amendments/' + reg_num, headers=headers) # check assert response.status_code == status # basic verification statement data check if status == HTTPStatus.OK: json_data = response.json assert json_data['amendmentRegistrationNumber'] == reg_num assert len(json_data['changes']) >= 1 assert json_data['changes'][0]['amendmentRegistrationNumber'] == reg_num if desc != 'Mismatch registrations staff': assert json_data['baseRegistrationNumber'] == base_reg_num assert json_data['changes'][0]['baseRegistrationNumber'] == base_reg_num
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_amendment_edit(session, client, jwt, description, data, sp_amend_id, debtor_amend_id):\n json_data = copy.deepcopy(data)\n if sp_amend_id is not None:\n json_data['addSecuredParties'][0]['amendPartyId'] = sp_amend_id\n else:\n del json_data['addSecuredParties'][0]['amendPartyId']\n if debtor_amend_id is not None:\n json_data['addDebtors'][0]['amendPartyId'] = debtor_amend_id\n else:\n del json_data['addDebtors'][0]['amendPartyId']\n current_app.config.update(PAYMENT_SVC_URL=MOCK_PAY_URL)\n current_app.config.update(AUTH_SVC_URL=MOCK_URL_NO_KEY)\n\n response = client.post('/api/v1/financing-statements/TEST0001/amendments',\n json=json_data,\n headers=create_header_account(jwt, [PPR_ROLE, STAFF_ROLE], 'test-user', STAFF_ROLE),\n content_type='application/json')\n assert response.status_code == HTTPStatus.CREATED\n result = response.json\n if sp_amend_id is None or sp_amend_id > 0:\n assert result['changes'][0]['addSecuredParties'][0].get('former_name')\n else:\n assert 'former_name' not in result['changes'][0]['addSecuredParties'][0]\n if debtor_amend_id is None or debtor_amend_id > 0:\n assert result['changes'][0]['addDebtors'][0].get('former_name')\n else:\n assert 'former_name' not in result['changes'][0]['addDebtors'][0]", "def test_create_amendment(session, client, jwt, desc, json_data, roles, status, has_account, reg_num):\n headers = None\n # setup\n current_app.config.update(PAYMENT_SVC_URL=MOCK_PAY_URL)\n current_app.config.update(AUTH_SVC_URL=MOCK_URL_NO_KEY)\n if has_account and BCOL_HELP in roles:\n headers = create_header_account(jwt, roles, 'test-user', BCOL_HELP)\n elif has_account and STAFF_ROLE in roles:\n headers = create_header_account(jwt, roles, 'test-user', STAFF_ROLE)\n elif has_account and GOV_ACCOUNT_ROLE in roles:\n headers = create_header_account(jwt, roles, 'test-user', '1234')\n elif has_account:\n headers = create_header_account(jwt, roles)\n else:\n headers = create_header(jwt, roles)\n\n # test\n response = client.post('/api/v1/financing-statements/' + reg_num + '/amendments',\n json=json_data,\n headers=headers,\n content_type='application/json')\n\n # check\n # print('Response data:')\n # print(response.json)\n assert response.status_code == status", "def test_amendment_success(session, client, jwt):\n # setup\n current_app.config.update(PAYMENT_SVC_URL=MOCK_PAY_URL)\n rv1 = create_financing_test(session, client, jwt)\n assert rv1.status_code == HTTPStatus.CREATED\n assert rv1.json['baseRegistrationNumber']\n base_reg_num = rv1.json['baseRegistrationNumber']\n\n json_data = copy.deepcopy(SAMPLE_JSON)\n json_data['baseRegistrationNumber'] = base_reg_num\n json_data['debtorName']['businessName'] = 'TEST BUS 2 DEBTOR'\n json_data['changeType'] = 'AM'\n del json_data['courtOrderInformation']\n del json_data['createDateTime']\n del json_data['amendmentRegistrationNumber']\n del json_data['payment']\n del json_data['removeTrustIndenture']\n del json_data['addTrustIndenture']\n del json_data['addSecuredParties']\n del json_data['deleteSecuredParties']\n del json_data['addDebtors']\n del json_data['deleteDebtors']\n del json_data['deleteGeneralCollateral']\n del json_data['addGeneralCollateral']\n del json_data['deleteVehicleCollateral']\n del json_data['documentId']\n json_data['deleteVehicleCollateral'] = rv1.json['vehicleCollateral']\n\n # test\n rv = client.post('/api/v1/financing-statements/' + base_reg_num + '/amendments',\n json=json_data,\n headers=create_header_account(jwt, [PPR_ROLE]),\n content_type='application/json')\n\n # check\n assert rv.status_code == HTTPStatus.CREATED\n json_data = rv.json\n assert 'amendmentRegistrationNumber' in json_data\n assert len(json_data['changes']) >= 1\n assert 'amendmentRegistrationNumber' in json_data['changes'][0]\n assert json_data['baseRegistrationNumber'] == base_reg_num\n assert json_data['changes'][0]['baseRegistrationNumber'] == base_reg_num", "def test_registration_modified_active(dummy_regform, api_delete, api_post):\n registration = dummy_regform.registrations[0]\n grant_access([registration], dummy_regform, email_body='body', email_subject='subject')\n # grant_access will already contact ADAMS\n assert api_post.call_count == 1\n\n modify_registration(registration, {\n 'first_name': 'Conan',\n 'last_name': 'Osiris',\n 'email': '[email protected]'\n })\n api_delete.call_count == 0\n assert api_post.call_count == 2\n api_post.assert_called_with(dummy_regform.event, [registration], update=True)", "def test_registration_modified_active(dummy_regform, api_delete, api_post):\n registration = dummy_regform.registrations[0]\n grant_access([registration], dummy_regform, email_body='body', email_subject='subject')\n # grant_access will already contact ADAMS\n assert api_post.call_count == 1\n\n modify_registration(registration, {\n 'first_name': 'Conan',\n 'last_name': 'Osiris',\n 'email': '[email protected]'\n })\n api_delete.call_count == 0\n assert api_post.call_count == 2\n api_post.assert_called_with(dummy_regform.event, [registration], update=True)", "def test_get_agreement(self):\n pass", "def test_meeting_registrants(self):\n pass", "def test_meeting_registrant_status(self):\n pass", "def test_approve_agreement(self):\n pass", "def test_add_admin_to_org(self):\n pass", "def test_successful_add_instructor():\n assert add_instructor('mary', 'jones', 'instructor')", "def test_meeting_registrant_question_update(self):\n pass", "def test_confirm_customization_details(self):\n pass", "def test_amendment_court_order_success(session, client, jwt):\n # setup\n current_app.config.update(PAYMENT_SVC_URL=MOCK_PAY_URL)\n rv1 = create_financing_test(session, client, jwt)\n assert rv1.status_code == HTTPStatus.CREATED\n assert rv1.json['baseRegistrationNumber']\n base_reg_num = rv1.json['baseRegistrationNumber']\n\n json_data = copy.deepcopy(SAMPLE_JSON)\n json_data['baseRegistrationNumber'] = base_reg_num\n json_data['debtorName']['businessName'] = 'TEST BUS 2 DEBTOR'\n json_data['changeType'] = 'CO'\n del json_data['createDateTime']\n del json_data['amendmentRegistrationNumber']\n del json_data['payment']\n del json_data['removeTrustIndenture']\n del json_data['addTrustIndenture']\n del json_data['deleteDebtors']\n del json_data['documentId']\n json_data['deleteDebtors'] = rv1.json['debtors']\n del json_data['deleteSecuredParties']\n json_data['deleteSecuredParties'] = rv1.json['securedParties']\n del json_data['deleteGeneralCollateral']\n json_data['deleteGeneralCollateral'] = rv1.json['generalCollateral']\n del json_data['deleteVehicleCollateral']\n json_data['deleteVehicleCollateral'] = rv1.json['vehicleCollateral']\n# print(json_data)\n\n # test\n rv = client.post('/api/v1/financing-statements/' + base_reg_num + '/amendments',\n json=json_data,\n headers=create_header_account(jwt, [PPR_ROLE]),\n content_type='application/json')\n\n # check\n# print(rv.json)\n assert rv.status_code == HTTPStatus.CREATED\n json_data = rv.json\n assert 'amendmentRegistrationNumber' in json_data\n assert len(json_data['changes']) >= 1\n assert 'amendmentRegistrationNumber' in json_data['changes'][0]\n assert json_data['baseRegistrationNumber'] == base_reg_num\n assert json_data['changes'][0]['baseRegistrationNumber'] == base_reg_num", "def test_get_marketplace_activation_status(self):\n pass", "def test_installments_get(self):\n pass", "def test_submit_for_endorsement(self):", "def test_meeting_registrants_questions_get(self):\n pass", "def test_apply_endorsements(self):", "def test_meeting_registrant_create(self):\n pass", "def test_contribute(self):\n pass", "def test_attend_check_has_the_registration(self):\n self.user = User.objects.create_user('[email protected]')\n self.venue = Venue.objects.create(name='Seoul City Hall', latitude=37.566676, longitude=126.978397)\n self.meet_up = MeetUp.objects.create(title='Python User Group Bimonthly Seminar', venue=self.venue)\n self.ticket = Ticket.objects.create(title='Normal Ticket', meet_up=self.meet_up, charge=10000)\n self.registration = Registration.objects.create(user=self.user, ticket=self.ticket)\n\n self.attend_check = AttendCheck.objects.create(registration=self.registration)\n\n self.assertIsNotNone(self.attend_check.registration)", "def test_find_by_change_registration_id(session, id, has_results):\n parties = MhrParty.find_by_change_registration_id(id)\n if has_results:\n assert parties\n assert len(parties) >= 1\n assert parties[0].party_type == MhrPartyTypes.SUBMITTING\n else:\n assert not parties", "def test_create_confirm_delivery_details(self):\n pass", "def test_meeting_invitation(self):\n pass", "def test_create_amendment_staff(session, client, jwt, role, routing_slip, bcol_number, dat_number, status):\n # setup\n current_app.config.update(PAYMENT_SVC_URL=MOCK_PAY_URL)\n json_data = copy.deepcopy(STATEMENT_VALID)\n params = ''\n if routing_slip:\n params = '?routingSlipNumber=' + str(routing_slip)\n elif bcol_number:\n params = '?bcolAccountNumber=' + str(bcol_number)\n if dat_number:\n params += '&datNumber=' + str(dat_number)\n print('params=' + params)\n\n # test\n response = client.post('/api/v1/financing-statements/TEST0001/amendments' + params,\n json=json_data,\n headers=create_header_account(jwt, [PPR_ROLE, role], 'test-user', role),\n content_type='application/json')\n\n # check\n assert response.status_code == status\n if response.status_code == HTTPStatus.CREATED:\n reg_num = response.json['amendmentRegistrationNumber']\n registration: Registration = Registration.find_by_registration_number(reg_num, 'PS12345', True)\n assert registration.verification_report", "def test_forgotten_password(forgot_link):\n print(forgot_link)\n assert \"forgotten\" in forgot_link", "def test_request_project_invitation(self):\n pass", "def test_verify_that_you_can_create_a_new_group():", "def test_update_activity(self):\n pass" ]
[ "0.64464396", "0.63845927", "0.63768023", "0.62937075", "0.62937075", "0.605112", "0.60243297", "0.59206325", "0.5902402", "0.5859729", "0.58191997", "0.5813629", "0.58083546", "0.5763737", "0.5750557", "0.5737895", "0.57193136", "0.5703327", "0.56942743", "0.5671445", "0.56685334", "0.56656504", "0.56539375", "0.5649675", "0.56472445", "0.56032866", "0.5582843", "0.5582409", "0.55690867", "0.55675644" ]
0.7382489
0
Assert that setting the amendment change type from the amendment data works as expected.
def test_change_types(session, client, jwt, change_type, is_general_collateral): current_app.config.update(PAYMENT_SVC_URL=MOCK_PAY_URL) json_data = copy.deepcopy(AMENDMENT_STATEMENT) json_data['changeType'] = change_type json_data['debtorName']['businessName'] = 'TEST BUS 2 DEBTOR' del json_data['createDateTime'] del json_data['payment'] del json_data['documentId'] del json_data['amendmentRegistrationNumber'] del json_data['courtOrderInformation'] del json_data['addTrustIndenture'] del json_data['removeTrustIndenture'] if change_type in (model_utils.REG_TYPE_AMEND_ADDITION_COLLATERAL, model_utils.REG_TYPE_AMEND_SUBSTITUTION_COLLATERAL, model_utils.REG_TYPE_AMEND_PARIAL_DISCHARGE): del json_data['addSecuredParties'] del json_data['deleteSecuredParties'] del json_data['addDebtors'] del json_data['deleteDebtors'] if change_type == model_utils.REG_TYPE_AMEND_PARIAL_DISCHARGE: del json_data['addVehicleCollateral'] del json_data['addGeneralCollateral'] del json_data['deleteGeneralCollateral'] elif change_type == model_utils.REG_TYPE_AMEND_ADDITION_COLLATERAL: del json_data['deleteVehicleCollateral'] del json_data['deleteGeneralCollateral'] if is_general_collateral: del json_data['addVehicleCollateral'] else: del json_data['addGeneralCollateral'] elif change_type == model_utils.REG_TYPE_AMEND_SUBSTITUTION_COLLATERAL: if is_general_collateral: del json_data['addVehicleCollateral'] del json_data['deleteVehicleCollateral'] else: del json_data['addGeneralCollateral'] del json_data['deleteGeneralCollateral'] if change_type in (model_utils.REG_TYPE_AMEND_DEBTOR_RELEASE, model_utils.REG_TYPE_AMEND_DEBTOR_TRANSFER, model_utils.REG_TYPE_AMEND_SP_TRANSFER): del json_data['addVehicleCollateral'] del json_data['deleteVehicleCollateral'] del json_data['addGeneralCollateral'] del json_data['deleteGeneralCollateral'] if change_type == model_utils.REG_TYPE_AMEND_DEBTOR_RELEASE: del json_data['addSecuredParties'] del json_data['deleteSecuredParties'] del json_data['addDebtors'] elif change_type == model_utils.REG_TYPE_AMEND_DEBTOR_TRANSFER: del json_data['addSecuredParties'] del json_data['deleteSecuredParties'] elif change_type == model_utils.REG_TYPE_AMEND_SP_TRANSFER: del json_data['addDebtors'] del json_data['deleteDebtors'] base_reg_num = 'TEST0001' json_data['baseRegistrationNumber'] = base_reg_num # Set well known ids for deletes if 'deleteDebtors' in json_data: json_data['deleteDebtors'][0]['partyId'] = 200000024 if 'deleteSecuredParties' in json_data: json_data['deleteSecuredParties'][0]['partyId'] = 200000026 if 'deleteGeneralCollateral' in json_data: json_data['deleteGeneralCollateral'][0]['collateraId'] = 200000000 if 'deleteVehicleCollateral' in json_data: json_data['deleteVehicleCollateral'][0]['vehicleId'] = 200000008 rv = client.post('/api/v1/financing-statements/' + base_reg_num + '/amendments', json=json_data, headers=create_header_account(jwt, [PPR_ROLE]), content_type='application/json') # check # print(rv.json) assert rv.status_code == HTTPStatus.CREATED assert 'amendmentRegistrationNumber' in rv.json
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_change_asset_type_assignment_rule(self):\n pass", "def test_change_relation_type(self):\n pass", "def test_update_data_type_of_attrvalue(self):\n user = User.objects.create(username=\"hoge\")\n\n entity = Entity.objects.create(name=\"entity\", created_user=user)\n entity.attrs.add(\n EntityAttr.objects.create(\n **{\n \"name\": \"attr\",\n \"type\": AttrTypeValue[\"string\"],\n \"created_user\": user,\n \"parent_entity\": entity,\n }\n )\n )\n\n entry = Entry.objects.create(name=\"entry\", schema=entity, created_user=user)\n entry.complement_attrs(user)\n\n attrv = entry.attrs.first().add_value(user, \"hoge\")\n\n # vanish data_type of initial AttributeValue instance\n attrv.data_type = 0\n attrv.save()\n\n # this processing complements data_type parameter of latest AttributeValue\n # as the current type of Attribute instance\n results = entry.get_available_attrs(self._user)\n self.assertEqual(len(results), 1)\n self.assertEqual(results[0][\"last_value\"], \"\")\n self.assertEqual(AttributeValue.objects.get(id=attrv.id).data_type, AttrTypeValue[\"string\"])", "def test_alteration_process(app, session, orig_legal_type, new_legal_type):\n # setup\n identifier = 'BC1234567'\n business = create_business(identifier)\n business.legal_type = orig_legal_type\n\n alteration_filing = copy.deepcopy(FILING_HEADER)\n alteration_filing['filing']['business']['legalType'] = orig_legal_type\n alteration_filing['filing']['alteration'] = copy.deepcopy(ALTERATION)\n alteration_filing['filing']['alteration']['business']['legalType'] = new_legal_type\n payment_id = str(random.SystemRandom().getrandbits(0x58))\n filing_submission = create_filing(payment_id, alteration_filing, business_id=business.id)\n\n # test\n alteration.process(business=business,\n filing_submission=filing_submission,\n filing=alteration_filing['filing'])\n\n # validate\n assert business.legal_type == new_legal_type", "def test_change_relation_types(self):\n pass", "def test_ticket_type_change_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('ticket_type change defect bug')\n rv, output = self._execute('ticket_type list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def test_investigation_type_setter(self):\n pt = PrepTemplate.create(self.metadata, self.new_raw_data,\n self.test_study, self.data_type_id)\n self.assertEqual(pt.investigation_type, None)\n pt.investigation_type = \"Other\"\n self.assertEqual(pt.investigation_type, 'Other')\n with self.assertRaises(QiitaDBColumnError):\n pt.investigation_type = \"should fail\"", "def test_getTypeName(self):\n self.assertEquals(ChangeType().getTypeName(),\n 'test.Change')", "def test_change_domain_type_assignment_rule(self):\n pass", "def test_update_attribute_data(self):\n pass", "def test_ticket_type_change_error_bad_type(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('ticket_type change bad_type changed_type')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def test_change_workflow_definition(self):\n pass", "def test_create_amendment_edit(session, client, jwt, description, data, sp_amend_id, debtor_amend_id):\n json_data = copy.deepcopy(data)\n if sp_amend_id is not None:\n json_data['addSecuredParties'][0]['amendPartyId'] = sp_amend_id\n else:\n del json_data['addSecuredParties'][0]['amendPartyId']\n if debtor_amend_id is not None:\n json_data['addDebtors'][0]['amendPartyId'] = debtor_amend_id\n else:\n del json_data['addDebtors'][0]['amendPartyId']\n current_app.config.update(PAYMENT_SVC_URL=MOCK_PAY_URL)\n current_app.config.update(AUTH_SVC_URL=MOCK_URL_NO_KEY)\n\n response = client.post('/api/v1/financing-statements/TEST0001/amendments',\n json=json_data,\n headers=create_header_account(jwt, [PPR_ROLE, STAFF_ROLE], 'test-user', STAFF_ROLE),\n content_type='application/json')\n assert response.status_code == HTTPStatus.CREATED\n result = response.json\n if sp_amend_id is None or sp_amend_id > 0:\n assert result['changes'][0]['addSecuredParties'][0].get('former_name')\n else:\n assert 'former_name' not in result['changes'][0]['addSecuredParties'][0]\n if debtor_amend_id is None or debtor_amend_id > 0:\n assert result['changes'][0]['addDebtors'][0].get('former_name')\n else:\n assert 'former_name' not in result['changes'][0]['addDebtors'][0]", "def test_parse_changesets_emails(self):\n \n self.assertEqual(bitchangesets.parse_changeset(self.changeset), {'timestamp': '2013-07-27 01:56:46', 'parsed_author': 'David Leonard'})", "def test_change_attribute(self):\n program = factories.ProgramFactory(title=\"Test program\")\n\n resp, review = generate_review_object(\n program, state=all_models.Review.STATES.REVIEWED)\n program_id = program.id\n self.assertEqual(201, resp.status_code)\n import_data = OrderedDict(\n [\n (\"object_type\", \"Program\"),\n (\"Code*\", program.slug),\n (\"Title*\", \"Brand new TiTle\"),\n (\"Program Managers\", \"[email protected]\"),\n ]\n )\n response = self.import_data(import_data)\n self._check_csv_response(response, {})\n program = all_models.Program.query.get(program_id)\n self.assertEqual(\n all_models.Review.STATES.UNREVIEWED, program.review_status\n )\n notification = all_models.Notification.query.filter_by(\n object_id=review.id, object_type=\"Review\"\n ).one()\n self.assertTrue(notification)", "def test_edit_asset_type(self):\n get_asset = Asset.objects.get(asset_code=\"IC001\")\n get_asset.asset_code = \"IC003\"\n get_asset.save()\n self.assertEqual(self.all_assets.count(), 1)\n get_asset = Asset.objects.get(asset_code=\"IC003\")\n self.assertEqual(get_asset.asset_code, \"IC003\")", "def testTheType(self, theTestType):\n \n pass", "def test_add_asset_type_assignment_rule(self):\n pass", "def test_mutate(self, change: Statement) -> None:\n self.assertThat(\n statement_mutates(change.statement()),\n Equals(True),\n )", "def test_issue_edit_issue(self):\n pass", "def test_updated_type(self):\n\n base_model = BaseModel()\n self.assertTrue(base_model.updated_at, datetime.datetime)", "def test_give_correct_change(self):\n item, change, _ = give_item_and_change('coke', 1)\n self.assertEqual(item, 'coke')\n self.assertEqual(change, [.20, .05, .02])", "def testExerciseActivityChange3m(self):\n attr = self.session.create_visit_attr()\n\n self.util.stringTypeTest(self, attr, \"activity_change_3m\")\n\n self.util.stringPropertyTest(self, attr, \"activity_change_3m\")", "def testType(self):\n def setType():\n self.node.type = 'banana'\n\n self.assertRaises(\n ValueError,\n setType\n )\n\n self.assertEqual(\n 'ccc',\n self.node.type\n )\n\n self.node._type = 'cdl'\n\n self.assertEqual(\n 'cdl',\n self.node.type\n )\n\n self.node.type = 'ccc'\n\n self.assertEqual(\n 'ccc',\n self.node.type\n )", "def test_patchorganizations_item(self):\n pass", "def test_patch_project_type_change(self):\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.project.sodar_uuid},\n )\n patch_data = {'type': PROJECT_TYPE_CATEGORY}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def test_patch_record(self):\n pass", "def test_data_type(self):\n self.assertTrue(self.tester.data_type(), \"18S\")", "def test_post_change_access_type_as_owner(self):\n assert self.testalbum.accesstype != ALBUM_GROUPS\n self.make_logged_in_owner()\n\n # get our manage page with form\n resp = self.client.get(reverse('manage_album', kwargs={'albumid': self.testalbum.id}))\n\n # get and populate form\n myform = resp.context['accesstypeform']\n data = myform.initial\n data['mytype'] = ALBUM_GROUPS\n\n # construct our post\n self.updateaccesspostrequest = self.factory.post(\n reverse(\"update_album_access\", kwargs={\"id\": self.testalbum.id}), data=data)\n self.updateaccesspostrequest.user = self.u\n\n # change access type\n resp = album.update_access_type(self.updateaccesspostrequest, self.testalbum.id)\n assert resp.status_code == 302\n self.testalbum.refresh_from_db()\n assert self.testalbum.accesstype == ALBUM_GROUPS", "def test_update_activity(self):\n pass" ]
[ "0.6533172", "0.6207626", "0.6028736", "0.6021598", "0.6020911", "0.5930699", "0.5922061", "0.59145033", "0.59051156", "0.5879773", "0.58577573", "0.5824438", "0.58031595", "0.5800085", "0.57931745", "0.5766806", "0.5757334", "0.574065", "0.56755495", "0.5674831", "0.5652479", "0.56494826", "0.56211185", "0.5607357", "0.55797523", "0.5555276", "0.55342907", "0.5530089", "0.5497948", "0.54951835" ]
0.66191286
0
Create a financing statement for testing.
def create_financing_test(session, client, jwt): statement = copy.deepcopy(FINANCING_STATEMENT) statement['debtors'][0]['businessName'] = 'TEST BUS 2 DEBTOR' statement['type'] = 'SA' del statement['createDateTime'] del statement['baseRegistrationNumber'] del statement['payment'] del statement['documentId'] del statement['lifeInfinite'] del statement['lienAmount'] del statement['surrenderDate'] current_app.config.update(PAYMENT_SVC_URL=MOCK_PAY_URL) return client.post('/api/v1/financing-statements', json=statement, headers=create_header_account(jwt, [PPR_ROLE]), content_type='application/json')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def doctest_DKBCCCsvStatementParser():", "def test_transaction_management_statements(self):\n for script_pattern in (\n \"BEGIN TRANSACTION; %s; COMMIT;\",\n \"BEGIN; %s; END TRANSACTION;\",\n \"/* comment */BEGIN TRANSACTION; %s; /* comment */COMMIT;\",\n \"/* comment */ BEGIN TRANSACTION; %s; /* comment */ COMMIT;\",\n \"\"\"\n-- comment\nBEGIN TRANSACTION;\n\n%s;\n\n-- comment\nCOMMIT;\"\"\",\n ):\n\n test_statement = (\"CREATE TABLE TEST1 (field1 int); \"\n \"DROP TABLE TEST1\")\n script = script_pattern % test_statement\n src = self.tmp()\n\n with open(src, 'wt') as f:\n f.write(script)\n\n sqls = SqlScript(src)\n sqls.run(self.engine)", "def construct_statement(*args):\n\n INPUT_STATEMENT = \"\"\n for statement in args:\n INPUT_STATEMENT += statement\n \n\n return INPUT_STATEMENT", "def from_statement(cls, statement):\r\n return cls('\\n'.join(textwrap.dedent(statement).splitlines()[1:]))", "def test_sql_injection(self):\n\n self.create_job('Tejas', '[email protected]', '1;DROP TABLE Job', 'here', 'description', 1)\n comment = Comment.query.filter_by(compensation=\"1;DROP TABLE Job\").first()\n self.assertTrue(comment)", "def test_no_implicit_returning_clause(self):\n\n MockTable = self.classes.MockTable\n ins = MockTable.__table__.insert().values(test=5).compile()\n expected = str(ins)\n assert expected == 'INSERT INTO test_schema.mocktable (test) VALUES (:test)'", "def createMakingTest(tx, query, personId, testId, date, hour, result):\n tx.run(query, personId=personId, testId=testId, date=date, hour=hour, result=result)", "def stmts_to_stmt(statements):\n if len(statements) == 1:\n return statements[0]\n array = FakeArray(statements, arr_type=pr.Array.NOARRAY)\n return FakeStatement([array])", "def create_table_statements() -> [str]:\n pass", "def insert_statement() -> str:\n pass", "def Generate(self):\n clauses = [self.main_clause] + self.use_clauses + self.join_clauses\n if self.where_conds:\n if self.or_where_conds:\n clauses.append('WHERE ' + '\\n OR '.join(self.where_conds))\n else:\n clauses.append('WHERE ' + '\\n AND '.join(self.where_conds))\n if self.group_by_terms:\n clauses.append('GROUP BY ' + ', '.join(self.group_by_terms))\n if self.having_conds:\n assert self.group_by_terms\n clauses.append('HAVING %s' % ','.join(self.having_conds))\n if self.order_by_terms:\n clauses.append('ORDER BY ' + ', '.join(self.order_by_terms))\n\n if self.limit and self.offset:\n clauses.append('LIMIT %d OFFSET %d' % (self.limit, self.offset))\n elif self.limit:\n clauses.append('LIMIT %d' % self.limit)\n elif self.offset:\n clauses.append('LIMIT %d OFFSET %d' % (sys.maxint, self.offset))\n\n if self.insert_args:\n clauses.append('VALUES (' + PlaceHolders(self.insert_args[0]) + ')')\n args = self.insert_args\n if self.duplicate_update_cols:\n clauses.append('ON DUPLICATE KEY UPDATE %s' % (\n ', '.join(['%s=VALUES(%s)' % (col, col)\n for col in self.duplicate_update_cols])))\n assert not (self.join_args + self.update_args + self.where_args +\n self.group_by_args + self.order_by_args + self.having_args)\n else:\n args = (self.join_args + self.update_args + self.where_args +\n self.group_by_args + self.having_args + self.order_by_args)\n assert not (self.insert_args + self.duplicate_update_cols)\n\n args = _BoolsToInts(args)\n stmt_str = '\\n'.join(clause for clause in clauses if clause)\n\n assert _IsValidStatement(stmt_str), stmt_str\n return stmt_str, args", "def _build_test_query01(self):\n\n\t\tquery = 'select \"GLOBALEVENTID\", \"SQLDATE\", \"MonthYear\", \"Year\" ' + \\\n\t\t\t\t\t 'from \"DEMOUSER00\".\"uni.vlba.gdelt.data::gdelt_dailyupdates\"'\t\t\n\n\t\treturn query", "def test_generate_f_file_queries_contracts(database, monkeypatch):\n sess = database.session\n sess.query(Subaward).delete(synchronize_session=False)\n sess.commit()\n\n parent_duns, duns, dom_country, int_country = reference_data(sess)\n\n # Setup - create awards, procurements, subcontract\n sub = SubmissionFactory(submission_id=1)\n d1_awd = DetachedAwardProcurementFactory(\n submission_id=sub.submission_id,\n idv_type=None,\n unique_award_key='AWD'\n )\n contract_awd = FSRSProcurementFactory(\n contract_number=d1_awd.piid,\n idv_reference_number=d1_awd.parent_award_id,\n contracting_office_aid=d1_awd.awarding_sub_tier_agency_c,\n company_address_country=dom_country.country_code,\n principle_place_country=int_country.country_code,\n duns=duns.awardee_or_recipient_uniqu,\n date_signed=datetime.now(),\n date_submitted=datetime(2019, 5, 30, 16, 25, 12, 34)\n )\n sub_contract_awd = FSRSSubcontractFactory(\n parent=contract_awd,\n company_address_country=int_country.country_code,\n principle_place_country=dom_country.country_code,\n subcontract_date=datetime.now()\n )\n d1_idv = DetachedAwardProcurementFactory(\n submission_id=sub.submission_id,\n idv_type='C',\n unique_award_key='IDV'\n )\n contract_idv = FSRSProcurementFactory(\n contract_number=d1_idv.piid,\n idv_reference_number=d1_idv.parent_award_id,\n contracting_office_aid=d1_idv.awarding_sub_tier_agency_c,\n company_address_country=dom_country.country_code,\n principle_place_country=int_country.country_code,\n duns=duns.awardee_or_recipient_uniqu,\n date_signed=datetime.now(),\n date_submitted=datetime(2019, 5, 30, 16, 25, 12, 34)\n )\n sub_contract_idv = FSRSSubcontractFactory(\n parent=contract_idv,\n company_address_country=int_country.country_code,\n principle_place_country=dom_country.country_code,\n subcontract_date=datetime.now()\n )\n\n sess.add_all([sub, d1_awd, contract_awd, sub_contract_awd, d1_idv, contract_idv, sub_contract_idv])\n sess.commit()\n\n # Gather the sql\n populate_subaward_table(sess, 'procurement_service', ids=[contract_awd.id, contract_idv.id])\n\n # Get the records\n contracts_results = sess.query(Subaward).order_by(Subaward.unique_award_key).all()\n\n created_at = updated_at = contracts_results[0].created_at\n\n # Expected Results\n assert compare_contract_results(contracts_results[0], d1_awd, contract_awd, sub_contract_awd, parent_duns, duns,\n dom_country, int_country, created_at, updated_at) is True\n assert compare_contract_results(contracts_results[1], d1_idv, contract_idv, sub_contract_idv, parent_duns, duns,\n dom_country, int_country, created_at, updated_at) is True", "def test_statement_initialized_with_just_one_field():\n shap = Statement(prop_id=\"dcterms:creator\")\n assert not shap.start\n assert shap.shape_id is None\n assert shap.shape_label is None\n assert shap.prop_id == \"dcterms:creator\"\n assert shap.prop_label is None\n assert shap.mand is None\n assert shap.repeat is None\n assert shap.value_type is None\n assert shap.value_datatype is None\n assert shap.constraint_value is None\n assert shap.constraint_type is None\n assert shap.shape_ref is None\n assert shap.annot is None", "def test_sql(cursor):\n np = parser(get_test_file(\"NLDN/example.bin\", fponly=True))\n np.sql(cursor)", "def build_statement(\n user: AbstractUser,\n verb: Verb,\n obj: Activity,\n context: Context,\n statement_id: Optional[uuid.UUID] = None,\n) -> Optional[Statement]:\n timestamp = timezone.now().isoformat()\n actor = _get_actor_from_user(user)\n if statement_id is None:\n statement_id = uuid.uuid4()\n if actor is None:\n logger.warning(\"Unable to get an XAPI actor definition for user %s\", user.id)\n return None\n return Statement(\n actor=actor,\n context=context,\n id=statement_id,\n object=obj,\n timestamp=timestamp,\n verb=verb,\n )", "def lab7_q3():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def test_sql_methods():\n\n err = _do_test_raw(\"\"\"\n x.executeSimpleSQL(\"foo \" + y);\n \"\"\")\n assert err.warnings[0]['id'][-1] == 'executeSimpleSQL_dynamic'\n\n err = _do_test_raw(\"\"\"\n x.createStatement(\"foo \" + y);\n \"\"\")\n assert err.warnings[0]['id'][-1] == 'executeSimpleSQL_dynamic'\n\n err = _do_test_raw(\"\"\"\n x.createAsyncStatement(\"foo \" + y);\n \"\"\")\n assert err.warnings[0]['id'][-1] == 'executeSimpleSQL_dynamic'", "def plan(self):\n\n plan = f\"\"\"\n Input parameters: {self.params}\n Product: {self.product}\n\n Source code:\n {self.source_code}\n \"\"\"\n\n print(plan)", "def lab7_q2():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def gen_q_stmt(name, query):\n return \"query {} `{}`;\\n\".format(name, query)", "def test_statement_initialized_from_named_arguments_and_order_is_insignficant():\n assert Statement(\n shape_id=\"@photo\", prop_id=\"dcterms:creator\", value_type=\"URI\"\n ) == Statement(prop_id=\"dcterms:creator\", shape_id=\"@photo\", value_type=\"URI\")", "def create_statement(self):\n query = self.commands.get_table_create_statement(self.name)\n if self.db.table_exists(self.name):\n statement = self.execute(query)[0][0]\n statement = re.sub('\\s+', ' ', statement)\n return statement\n raise ValueError('Table does not exist, no create statement')", "def lab7_q4():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def test_new():\n from qiskit import BasicAer\n from qiskit.aqua.algorithms import Grover\n from qiskit.aqua.components.oracles import LogicalExpressionOracle\n\n expr = \"your logical expression goes here\"\n algorithm = Grover(LogicalExpressionOracle(expr))\n backend = BasicAer.get_backend('qasm_simulator')\n result = algorithm.run(backend, seed=101110)\n print(result)", "def create_operator(statement_a, operator, statement_b):\n return S(statement_a=statement_a, operator=operator, statement_b=statement_b)", "def lab7_q1():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def lab9_q3():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def lab8_q3():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def lab9_q2():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"" ]
[ "0.5922423", "0.58748436", "0.57381195", "0.5712238", "0.5536309", "0.54875225", "0.5401516", "0.5340367", "0.5328818", "0.5325756", "0.5258779", "0.5224878", "0.51681894", "0.5089125", "0.50795174", "0.50786793", "0.5077896", "0.50717795", "0.50620943", "0.50568455", "0.50526536", "0.503532", "0.50326586", "0.50294507", "0.50277054", "0.50262773", "0.5017911", "0.5017095", "0.5013397", "0.5011584" ]
0.65338284
0
Test the properties homepage.
def test_properties_index(self): result = self.client.get('/') self.assertEqual(result.status, '200 OK') self.assertIn(b'Welcome', result.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_property_page(self):\n self.property_page.proceed_to_property_page()\n\n \"\"\"Step2 - Check rooms section\n Exp2 - Property page opened \"\"\"\n self.property_page.check_rooms_section()\n\n \"\"\"Step3 - Check other section\n Exp3 - Each item works well \"\"\"\n self.property_page.check_other_section()", "def test_properties_get(self):\n pass", "def test_home_page(self):\n\n self.browser.get('http://localhost:8000/index.html')\n\n # there is a page title defined by <title></title> on the home page\n # check it\n\n self.assertIn('Stability within Movement',self.browser.title)\n\n # You will have an image for your home page I am assuming.\n # Put the name of your image here in place of homebrew.png\n # In general this is how we check for images on a page.\n\n # The user sees an image of sun hitting the Washington Monument\n\n m=self.browser.find_element_by_tag_name('img')\n self.assertIn('help.jpg',m.get_attribute('src'))\n\n a=self.browser.find_element_by_id('sun')\n a.click()\n\n self.assertIn('sun',self.browser.title)\n\n h=self.browser.find_element_by_tag_name('h1')\n\n m=self.browser.find_element_by_tag_name('img')\n\n # the user goes back to the home page\n # self.browser.back()\n self.browser.get('http://localhost:8000/index.html')\n\n # the user sees at the bottom of the page a link to credits\n l=self.browser.find_element_by_link_text('Credits')\n\n # the user clicks on the credits link\n l.click()\n # and sees the credits.html page\n a=self.browser.current_url\n self.assertIn(\"credits.html\",a)", "def test_homepage(self):\n rv = self.app.get('/')\n assert 'Enter your url here' in rv.data", "def test_homepage_view(self):\n response = self.client.get(url_for('home'))\n self.assertEqual(response.status_code, 200)", "def test_homepage_view(self):\n response = self.client.get(url_for('home'))\n self.assertEqual(response.status_code, 200)", "def test_serve_user_properties(self):\n pass", "def test_show_on_homepage(self) -> None:\n self.assert_show_on_homepage(apps.wakeup.main.Controller)", "def test_homepage(self):\n\n result = self.client.get(\"/\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Homepage\", result.data)", "def test_home(self):\n\t\tresponse = self.client.get('/')\n\t\tself.assertContains(response, 'Home Page', 1, 200)", "def test_app_properties(s):\n simple_app = s['simple-app']\n derived_app = s['derived-app']\n\n assert simple_app.url == (s.url + '/simple-app')\n assert simple_app.title == \"Simple App\"\n assert derived_app.url == (s.url + '/derived-app')\n assert derived_app.title == \"Derived App\"", "def test_list_properties(self):\n pass", "def test_homepage(self):\n\n response = self.client.get(\"/\")\n self.assertIn(\"Books</title>\", response.data)\n self.assertIn(\"Goodreads ID\", response.data)", "def test_home(self):\n self.selenium.get('{}/'.format(self.live_server_url))", "def test_homepage(self):\n rc = self.app.get('/')\n assert b'Welcome to Code TA' in rc.data\n assert b'Logout' not in rc.data", "def test_home(self):\n response = self.client.get('/')\n self.assertContains(response, 'Home Page', 1, 200)", "def test_properties_distribution_get(self):\n pass", "def test_homepage_it(self):\n\n self.driver.get(self.url_ + '/?hl=it')\n\n title_present = EC.text_to_be_present_in_element(\n (By.XPATH, '//*[@id=\"main-nav\"]/div/div[1]/a'), 'Data Commons')\n WebDriverWait(self.driver, self.TIMEOUT_SEC).until(title_present)\n\n hero_msg = self.driver.find_elements_by_class_name('lead')[0]\n self.assertTrue(\n hero_msg.text.startswith(\n 'Data Commons è un repository di conoscenza aperto che combina i dati provenienti'\n ))\n\n explore_callout_msg = self.driver.find_element_by_xpath(\n '//*[@id=\"homepage\"]/section[3]/p')\n self.assertTrue(\n explore_callout_msg.text.startswith(\n 'Abbiamo pulito ed elaborato i dati al tuo posto, così non dovrai farlo tu.'\n ))\n\n nyc_health = self.driver.find_element_by_xpath(\n '//*[@id=\"homepage\"]/section[3]/ul/li[1]/ul/li[2]/a')\n self.assertEqual(nyc_health.text, 'Salute a New York, New York')\n self.assertEqual(nyc_health.get_attribute('href'),\n self.url_ + '/place/geoId/3651000?topic=Health&hl=it')\n\n schema_org = self.driver.find_element_by_xpath(\n '//*[@id=\"homepage\"]/section[5]/ul/li[2]')\n self.assertEqual(schema_org.text,\n 'Progetto open source realizzato con Schema.org.')\n\n more_msg = self.driver.find_element_by_xpath(\n '//*[@id=\"homepage\"]/section[3]/ul/li[3]/ul/li[4]/a')\n self.assertEqual(more_msg.text, 'altro…')", "def test_homepage(self):\n \n result = self.client.get(\"/\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"What type of user are you?\", result.data)", "def test_home_view_one_object(self):\n self.create_obj()\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertIn('42 Coffee Cups Test Assignment', response.content)\n self.assertIn('Name', response.content)\n self.assertIn('Last name', response.content)\n self.assertIn('Date of birth', response.content)\n self.assertIn('bio', response.content)\n self.assertIn('Email', response.content)\n self.assertIn('Jabber', response.content)\n self.assertIn('Andrei', response.content)\n self.assertIn('Herasko', response.content)\n self.assertIn('Feb. 23, 1998', response.content)\n self.assertIn('[email protected]', response.content)\n self.assertIn('[email protected]', response.content)\n self.assertIn('ander2299', response.content)", "def test_home_page(self):\r\n url = reverse('home')\r\n response = self.client.get(url)\r\n\r\n self.assertEqual(response.status_code, 200)", "def test_home(self):\n\n response = self.client.get(reverse('home'))\n\n assert response.status_code == 200", "def test_fundamental_view_properties(self):\n response = self.client.get(\n reverse('users:profile', kwargs={'pk' : self.u.pk})\n )\n title = BeautifulSoup(response.content, features='html.parser').find('title').getText().strip().replace('\\n', '')\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.resolver_match.func.view_class, views.ProfileView)\n self.assertEqual(title, 'Profile №%d \\\\ Chattings' % self.u.id)", "def test_homepage_en(self):\n\n self.driver.get(self.url_ + '/')\n\n title_present = EC.text_to_be_present_in_element(\n (By.XPATH, '//*[@id=\"main-nav\"]/div/div[1]/a'), 'Data Commons')\n WebDriverWait(self.driver, self.TIMEOUT_SEC).until(title_present)\n\n hero_msg = self.driver.find_elements_by_class_name('lead')[0]\n self.assertTrue(\n hero_msg.text.startswith(\n 'Data Commons is an open knowledge repository'))\n\n explore_callout_msg = self.driver.find_element_by_xpath(\n '//*[@id=\"homepage\"]/section[3]/p')\n self.assertTrue(\n explore_callout_msg.text.startswith(\n 'We cleaned and processed the data so you don\\'t have to'))\n\n nyc_health = self.driver.find_element_by_xpath(\n '//*[@id=\"homepage\"]/section[3]/ul/li[1]/ul/li[2]/a')\n self.assertEqual(nyc_health.text, 'New York City, NY Health')\n self.assertEqual(nyc_health.get_attribute('href'),\n self.url_ + '/place/geoId/3651000?topic=Health')\n\n schema_org = self.driver.find_element_by_xpath(\n '//*[@id=\"homepage\"]/section[5]/ul/li[2]')\n self.assertEqual(schema_org.text,\n 'Open sourced, built using Schema.org.')\n\n more_msg = self.driver.find_element_by_xpath(\n '//*[@id=\"homepage\"]/section[3]/ul/li[3]/ul/li[4]/a')\n self.assertEqual(more_msg.text, 'more ...')", "def test_homepage(self):\r\n\r\n result = self.client.get(\"/\")\r\n self.assertIn(b\"Welcome!\", result.data)", "def test_main_page(self):\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n content = response.content.decode('utf-8')\n self.assertTrue('Improving the FOIA request experience' in content)", "def test_venue_list_properties(self, actual):\n print(actual(self.venue_list_page))\n self.assertTrue(actual(self.venue_list_page))", "def test_properties_evolution_get(self):\n pass", "def test_properties_stats_get(self):\n pass", "def test_homepage(self):\n\n with self.client as client:\n response = client.get('/')\n html = response.get_data(as_text=True)\n self.assertEqual(response.status_code, 200)\n self.assertIn('<table class=\"board\">', html)\n self.assertIn('<table', html)\n self.assertIn('boggle homepage. used in testing', html)\n # test that you're getting a template" ]
[ "0.7226637", "0.6801965", "0.6606398", "0.65856403", "0.6486094", "0.6486094", "0.6480849", "0.6455193", "0.6366206", "0.6360378", "0.6354731", "0.6353792", "0.6351547", "0.6343604", "0.63424563", "0.6332773", "0.6328751", "0.63285375", "0.63134605", "0.63006544", "0.6274684", "0.6257536", "0.6248984", "0.6238399", "0.6212808", "0.62011266", "0.6191323", "0.61688626", "0.6080396", "0.6072823" ]
0.7332575
0
Test the new offer creation page.
def test_offers_new(self): result = self.client.get('/offers_new') self.assertEqual(result.status, '200 OK') self.assertIn(b'Make an Offer', result.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_view(self):\n supplement = SupplementFactory(user=self.user_1)\n time = get_utc_now()\n\n post_data = {\n \"supplement_uuid\": str(supplement.uuid),\n \"time\": time.isoformat(),\n \"quantity\": 5,\n }\n\n response = self.client_1.post(self.url, data=post_data)\n self.assertEqual(response.status_code, 200, response.data)\n\n data = response.data\n supplement_name = data[\"supplement\"][\"name\"]\n self.assertEqual(supplement.name, supplement_name)\n self.assertIsNotNone(data[\"display_name\"])", "def test_submit_offer(self, mock_insert):\n result = self.client.post('offers_show', data=sample_form_data)\n\n # After submitting, should redirect to the offers_show page.\n self.assertEqual(result.status, '302 FOUND')\n mock_insert.assert_called_with(sample_offer)", "def helper_test_create_vessel(self):\n url = reverse('vessel-create')\n payload = json.dumps({\n \"code\": \"MV101\"\n })\n response = self.post(url, payload)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def test_creating_supply_admin(self):\n request = self.factory.post(\n '/api/supplies/', {'name': '3d printer', 'state': 'good state', 'description': 'prints 3d objects'})\n force_authenticate(request, user=self.testadmin)\n response = SupplyListView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n try:\n supply = Supply.objects.get(name='3d printer')\n self.assertEqual(supply.name, '3d printer')\n self.assertEqual(supply.state, 'good state')\n self.assertEqual(supply.description, 'prints 3d objects')\n except Supply.DoesNotExist:\n self.fail()", "def test_new_equipment_page(self):\n create_user()\n login(self.app, 'me1', 'password')\n\n response = self.app.get('/new_equipment', follow_redirects=True)\n self.assertEqual(response.status_code, 200)\n\n response_text = response.get_data(as_text=True)\n self.assertIn('New Equipment', response_text)\n self.assertIn('Name', response_text)\n self.assertIn('Quantity', response_text)\n self.assertIn('Submit', response_text)\n\n self.assertNotIn('Calendar ', response_text)\n self.assertNotIn('Logout', response_text)\n self.assertNotIn('Login', response_text)\n self.assertNotIn('Sign up', response_text)", "def test_get_offers(self):\n pass", "def test_new(self):\n result = self.client.get('/sneakers/new')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'New Sneaker', result.data)", "def test_show_offer(self, mock_find):\n mock_find.return_value = sample_offer\n\n result = self.client.get(f'/offers/{sample_offer_id}')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Description', result.data)", "def _create_offer(\n org,\n offer_item_name='Test Item',\n offer_limit=None,\n currents_share=25,\n is_master=False\n):\n offer_item = Item(name=offer_item_name)\n offer_item.save()\n\n offer = Offer(\n org=org,\n item=offer_item,\n currents_share=currents_share,\n is_master=is_master\n )\n\n if offer_limit:\n offer.limit = offer_limit\n\n offer.save()\n\n return offer", "def test_alert_create(self):\n pass", "def test_publication_view_create(self):\n \n test_response = self.client.get('/papers/new/')\n self.assertEqual(test_response.status_code, 200)\n self.assertTemplateUsed(test_response, 'base.html')\n self.assertTemplateUsed(test_response, 'publication_form.html')", "def test_create(self):\n pass", "def test_create_page_with_help_box(self):\n\n button = PageButton.objects.create(**_button_data)\n help_block = PageHelpBoxBlock.objects.create(\n button=button, **_help_box_data)\n page = Page.objects.create(help_box_block=help_block, **_page_data)\n\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n self.assertEqual(page.help_box_block.button, button)\n self.assertIn('text', response.context)\n self.assertIn('button', response.context)", "def test_add(self):\n self.client.login(username='admin', password='admin')\n response = self.client.post('/add/', {'url': 'http://example.com'}, follow=True)\n self.assertShortURLCreated(response)", "def test_create_page(self):\n self.assertEqual(self.client.get(reverse('home')).status_code, 404)\n\n page = Page.objects.create(**_page_data)\n\n self.assertEqual(page.title, _page_data['title'])\n self.assertEqual(page.page_type, _page_data['page_type'])\n\n response = self.client.get(reverse('home'))\n\n self.assertEqual(response.status_code, 200)\n self.assertIn('page_settings', response.context_data)", "def test_create_provider(self):\n url = reverse('provider-list')\n data = {'name': 'foo'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Provider.objects.count(), 1)", "def test_form_submition_and_product_creation(user_company, client, authenticated_user):\n add_product_url = reverse('add-product')\n response = client.post(add_product_url, {\n 'name': 'Test_product_name',\n 'serial_number': 'XZ001', \n 'manufacturer': 'Test company',\n 'price_net': 415.26,\n 'description': fake.paragraph(),\n 'stock': 16\n })\n assert response.status_code == 302\n product = Product.objects.get(name='Test_product_name')\n assert response.url == reverse('product-detail',kwargs={'pk': product.pk}) \n assert product.user == authenticated_user\n assert product in Product.objects.all()", "def test_create_item(self):\n\n url = reverse('stock-item-create')\n\n response = self.client.get(url, {'part': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n\n response = self.client.get(url, {'part': 999}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n\n # Copy from a valid item, valid location\n response = self.client.get(url, {'location': 1, 'copy': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n\n # Copy from an invalid item, invalid location\n response = self.client.get(url, {'location': 999, 'copy': 9999}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)", "def test_subscribe_offer(self):\n pass", "def test_create_contract_admin_page(self):\n # asserts that there aren't any properties in changelist view\n response = self.client.get('/admin/contracts/contract/')\n content = response.content\n self.assertNotIn('table', content)\n self.assertIn(\n '<a href=\"/admin/contracts/contract/add/\" class=\"addlink\">',\n content)\n\n # creates the contract\n payload = self.contract_one_data\n payload['tenant'] = payload['tenant'].id\n payload['property'] = payload['property'].id\n response = self.client.post(\n '/admin/contracts/contract/add/', payload, follow=True)\n self.assertEqual(response.status_code, 200)\n\n # checks it shows in listing\n response = self.client.get('/admin/contracts/contract/')\n content = response.content\n self.assertIn('table', content)\n self.assertIn(str(self.contract_one_data['rent']), content)", "def test_perform_create(self):\n\n response = self.client.post(reverse('action-list'), data=self.data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.data['name'], self.data['name'])\n self.assertTrue(len(response.data['institution']), self.data['institution'])", "def test_commentary_view_create(self):\n \n test_response = self.client.get('/papers/commentary/new')\n self.assertEqual(test_response.status_code, 200)\n self.assertTemplateUsed(test_response, 'base.html')\n self.assertTemplateUsed(test_response, 'commentary-form.html') \n self.assertTemplateUsed(test_response, 'analytics_tracking.html')", "def test_get_create_page(self):\n\n url = reverse('create-notification')\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)", "def test_get_create_page(self):\n\n url = reverse('create-notification')\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)", "def goto_create(self):\n\n self.create.click()", "def test_detail_views(self):\n obj = self.create_post(title='Some new title for new test')\n response = self.client.get(obj.get_absolute_url())\n # TODO You need to check that the description and title are present in the html returned from the server Dilshad\n self.assertEqual(response.status_code, 200)", "def test_create_book(self):\n url = reverse('book-list')\n data = {'isbn':'96712116-1',\n 'title':'New Star',\n 'author_last_name':'Khaled',\n 'author_first_name':'Roshdy',\n 'page_count':250,\n 'description':'the book description'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def test_new_event_page(self):\n create_user()\n login(self.app, 'me1', 'password')\n\n response = self.app.get('/new_event/2/2021-05-13/11:00-13:00', follow_redirects=True)\n self.assertEqual(response.status_code, 200)\n\n response_text = response.get_data(as_text=True)\n self.assertIn('New Event', response_text)\n self.assertIn('Title', response_text)\n self.assertIn('Date', response_text)\n self.assertIn('Timeslot', response_text)\n self.assertIn('Color', response_text)\n self.assertIn('Submit', response_text)\n\n self.assertNotIn('Calendar ', response_text)\n self.assertNotIn('Logout', response_text)\n self.assertNotIn('Login', response_text)\n self.assertNotIn('Sign up', response_text)", "def test_offers_show_every(self):\n result = self.client.get('/offers_show_every')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Offers', result.data)", "def test_template(self):\n # Setup\n # Test\n response = self.client.get(self.WIZARD_URL)\n\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"publish/feed_form.html\")" ]
[ "0.72297406", "0.70225155", "0.65767425", "0.6547002", "0.6517671", "0.65035397", "0.6491894", "0.6482098", "0.6441887", "0.6362172", "0.63465416", "0.63443273", "0.6332997", "0.6332686", "0.6323293", "0.63143426", "0.63076407", "0.6299864", "0.6273754", "0.622774", "0.61926955", "0.6188135", "0.6180193", "0.6180193", "0.6173543", "0.61542845", "0.61533606", "0.6152854", "0.6152158", "0.6131194" ]
0.80279493
0
Test showing the page of all offers.
def test_offers_show_every(self): result = self.client.get('/offers_show_every') self.assertEqual(result.status, '200 OK') self.assertIn(b'Offers', result.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_offers(self):\n pass", "def test_show_offer(self, mock_find):\n mock_find.return_value = sample_offer\n\n result = self.client.get(f'/offers/{sample_offer_id}')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Description', result.data)", "def test_basic_render_index_page(self):\n url = reverse('shipping.views.index')\n response = self.client.get(url)\n eq_(response.status_code, 200)\n self.assert_all_embeds(response.content)", "def test_pagination(self):\n self.check_pagination()", "def test_get_dealer_landing_page(self):\n pass", "def test_bill_page(self):\n bill = self.fx.BillData.food\n self.make_request(\"/bill/%d/\" % bill.id, follow_redirects=True)\n self.assertIn(bill.type.name, self.html)\n # Check if \"na\" stage is in page\n self.assertIn(\"stage2\", self.html)\n # Check if plenary event is shown in Bill History\n self.assertIn(\"Bill history\", self.html)\n self.assertIn(\"National Assembly\", self.html)", "def test_page_list_admin(self):\n user = self.get_superuser()\n title_1 = 'page'\n title_2 = 'inner'\n title_3 = 'page 3'\n page = create_page(title_1, 'page.html', 'en', published=True)\n page_2 = create_page(title_2, 'page.html', 'en', published=True, parent=page)\n page_3 = create_page(title_3, 'page.html', 'en', published=False)\n\n with self.login_user_context(user):\n url = reverse('api:page-list')\n response = self.client.get(url, format='json')\n self.assertEqual(len(response.data), 3)\n for page in response.data:\n self.assertIn(page.get('title'), {title_1, title_2, title_3})", "def test_tags_browse_click_page_links_check_items_displayed(self):\n\n po = self.catalog.load_pageobject('TagsBrowsePage')\n po.goto_page()\n\n # change the display limit to 5\n new_display_limit = '5'\n po.form.footer.display_limit(new_display_limit)\n\n # get the updated display limit\n display_limit = int(po.form.footer.display_limit())\n\n assert display_limit == int(new_display_limit), \\\n \"updated display limit does not match the display\" \\\n + \" limit set by user: updated display limit =\" \\\n + \" '%s', user set display limit = '%s'\" \\\n % (display_limit,new_display_limit)\n\n # get the updated page number links\n page_numbers = po.get_link_page_numbers()\n\n page_url = po.current_url()\n\n for p in page_numbers:\n # click the page number link\n po.goto_page_number(p)\n\n po2 = self.catalog.load_pageobject('TagsBrowsePage')\n\n # get the number of items that should be displayed\n # according to the pagination counts\n (start,end,total) = po2.get_pagination_counts()\n num_pag = (end-start+1)\n\n # get the number of items that are actually displayed\n num_rows = po2.form.search_results.num_rows()\n\n # compare that is should be displayed to what is displayed\n assert num_pag == num_rows, \\\n \"after clicking page link #%s on %s,\" % (p,page_url) \\\n + \" the number of items displayed does not match the\" \\\n + \" number of items listed in the pagination counts:\" \\\n + \" displayed = %s, start = %s,\" % (num_rows,start) \\\n + \" end = %s, end-start+1 (what should be displayed) = %s\" \\\n % (end,num_pag)\n\n # return back to our original page\n self.browser._browser.back()", "def test_offers_new(self):\n result = self.client.get('/offers_new')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Make an Offer', result.data)", "def test_shelf_page(self, *_):\n view = views.Shelf.as_view()\n shelf = self.local_user.shelf_set.first()\n request = self.factory.get(\"\")\n request.user = self.local_user\n with patch(\"bookwyrm.views.shelf.is_api_request\") as is_api:\n is_api.return_value = False\n result = view(request, self.local_user.username, shelf.identifier)\n self.assertIsInstance(result, TemplateResponse)\n validate_html(result.render())\n self.assertEqual(result.status_code, 200)\n\n with patch(\"bookwyrm.views.shelf.is_api_request\") as is_api:\n is_api.return_value = True\n result = view(request, self.local_user.username, shelf.identifier)\n self.assertIsInstance(result, ActivitypubResponse)\n self.assertEqual(result.status_code, 200)\n\n request = self.factory.get(\"/?page=1\")\n request.user = self.local_user\n with patch(\"bookwyrm.views.shelf.is_api_request\") as is_api:\n is_api.return_value = True\n result = view(request, self.local_user.username, shelf.identifier)\n self.assertIsInstance(result, ActivitypubResponse)\n self.assertEqual(result.status_code, 200)", "def test_tags_view_click_page_links_check_items_displayed(self,tag_with_items):\n\n self.tag_name = tag_with_items\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n po.search_for_content([self.tag_name])\n\n po = self.catalog.load_pageobject('TagsViewPage')\n\n # change the display limit to 5\n new_display_limit = '5'\n po.form.footer.display_limit(new_display_limit)\n\n # get the updated display limit\n display_limit = int(po.form.footer.display_limit())\n\n assert display_limit == int(new_display_limit), \\\n \"updated display limit does not match the display limit\" \\\n + \" set by user: updated display limit =\" \\\n + \" '%s', user set display limit = '%s'\" \\\n % (display_limit,new_display_limit)\n\n # get the updated page number links\n page_numbers = po.get_link_page_numbers()\n\n page_url = po.current_url()\n\n for p in page_numbers:\n # click the page number link\n po.goto_page_number(p)\n\n po2 = self.catalog.load_pageobject('TagsViewPage')\n\n # get the number of items that should be displayed\n # according to the pagination counts\n (start,end,total) = po2.get_pagination_counts()\n num_pag = (end-start+1)\n\n # get the number of items that are actually displayed\n num_rows = po2.form.search_results.num_rows()\n\n # compare that is should be displayed to what is displayed\n assert num_pag == num_rows, \\\n \"after clicking page link #%s on %s,\" \\\n % (p,page_url) \\\n + \" the number of items displayed does not match the\" \\\n + \" number of items listed in the pagination counts:\" \\\n + \" displayed = %s, start = %s, end = %s,\" \\\n % (num_rows,start,end) \\\n + \" end-start+1 (what should be displayed) = %s\" \\\n % (num_pag)\n\n # return back to our original page\n self.browser._browser.back()", "def _test_one_page(self, page=1, **kwargs):\n params = {\"page\": page}\n params.update(kwargs)\n response = self.client.get(self.url, params)\n\n # Check the requested page number is within the proper range\n if page > self.max_page_num:\n self.assertEqual(response.status_code, 404)\n return\n\n # Standard response\n self.assertEqual(response.status_code, 200)\n\n # Publication list\n publication_block = '<li class=\"publication-list-year\">'\n start = self.paginate_by * (page - 1)\n end = self.paginate_by * page\n if end > self.n_publications:\n end = self.n_publications\n expected_count = len(set(self.publications_years[start:end]))\n self.assertContains(response, publication_block, count=expected_count)\n\n publication_block = '<li class=\"publication\">'\n self.assertContains(response, publication_block, count=end - start)\n\n # Pagination\n self.assertTrue(response.context[\"is_paginated\"])\n\n pagination_block = '<div class=\"pagination-centered\">'\n self.assertContains(response, pagination_block)\n\n pagination_block = '<a href=\"\">%d</a>' % page\n self.assertContains(response, pagination_block)", "def test_get_urls(self):\r\n OFFER_URLS = [\"http://olx.pl/offer1\",\r\n \"http://olx.pl/offer2\",\r\n \"http://olx.pl/offer3\",\r\n \"http://olx.pl/offer4\",\r\n \"http://olx.pl/offer5\",\r\n \"http://olx.pl/offer6\"]\r\n\r\n SEARCH_QUERY = \"http://SEARCH_QUERY_URL?\"\r\n \r\n for url in OfferSearcher.search(SEARCH_QUERY, 6, WebDocumentFetcherStub):\r\n self.assertTrue(url in OFFER_URLS, \"Unexpected offer url fetched: %s\" % url)\r\n OFFER_URLS.remove(url)\r\n \r\n self.assertEquals(0, len(OFFER_URLS), \"Not all offer urls fetched: %s\" % OFFER_URLS)", "def _test_filtering(self, **kwargs):\n params = dict()\n params.update(kwargs)\n response = self.client.get(self.url, params)\n\n # Standard response\n self.assertEqual(response.status_code, 200)\n\n # Display at list a publication\n publication_block = '<li class=\"publication\">'\n self.assertContains(response, publication_block)", "def test_page(self):\n result = self.test_client.page\n\n assert result == 1", "def test_get_html_paginated(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'video_upload_pagination')", "def test_pagination(self):\n for page in range(1, 5):\n self._test_one_page(page=page)", "def test_airbnb_scenario(self):\r\n self.main_page_object = mainPage(self.driver)\r\n self.main_page_object.select_experiences()\r\n self.main_page_object.search_experiences_by_city()\r\n\r\n self.experience_page_object = experiencesPage(self.driver)\r\n self.experience_page_object.add_date()\r\n self.experience_page_object.select_guests()\r\n\r\n self.experience_page_object.price_filter()\r\n self.experience_page_object.search_result1()\r\n self.experience_page_object.search_result2()\r\n\r\n # self.experience_page_object.assert_results()\r", "def test_book_pages(self):\n url = reverse(\"book:book-detail\", kwargs={\"slug\": self.book.slug})\n response = self.client.get(url)\n assert response.status_code == 200\n assert not \"book_pages\" in json.loads(response.content)", "def test_response_is_paginated(self):\r\n user = ViewAfishaTests.mentor\r\n EventFactory.create_batch(50, city=user.profile.city)\r\n client = self.return_authorized_user_client(user)\r\n\r\n response_data = client.get(path=EVENTS_URL).data\r\n\r\n self.assertTrue(\"next\" in response_data)\r\n self.assertTrue(\"previous\" in response_data)\r\n self.assertTrue(\"results\" in response_data)", "def test_all_available(self):\n response = self.client.get(\n reverse('bookings', kwargs={'facility': 'g'}))\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 200)\n\n self.assertEqual(context[\"username\"], self.user)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)\n self.assertEqual(context[\"display_first_week\"], True)\n\n self.assertEqual(bookings[0].calendar_week,\n self.current_week.calendar_week)\n self.assertEqual(bookings[1].calendar_week,\n self.current_week.calendar_week + 1)\n self.assertEqual(bookings[2].calendar_week,\n self.current_week.calendar_week + 2)\n self.assertEqual(bookings[3].calendar_week,\n self.current_week.calendar_week + 3)\n\n for week in bookings:\n for row in week.rows:\n for block in row.blocks:\n self.assertEqual(type(block), BlockAvailable)", "def test_avilable_articles_list(self):\n response = self.c.get('/articles/available/')\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.template_name, ['articles/article_list.html'])\n self.assertContext(response, {\n 'all_columns':['Project', 'Keywords', 'Writer', 'Reviewer', 'Status', 'Category', 'Length', 'Priority', 'Tags'],\n 'sidebar_links': (\n ('Articles', (('Unavailable', 1, False), ('Available', 11, False), ('Assigned', 3, False), ('Claimed', 1, False), ('Submitted', 2, False), ('Approved', 2, False), ('Rejected', 1, False), ('Published', 1, False))), \n ('Writers', (('My Writers', 4, False), ('Writers Pending', 1, False), ('Writers Avail.', 2, True), ('Writer Groups', 1, True))), \n ('Reviewers', (('My Reviewers', 1, False), ('Reviewers Pending', 1, False), ('Reviewers Avail.', 2, True), ('Reviewer Groups', 1, True)))\n ),\n # 'reviewer_filter_counts':{'Reviewers Pending': (1, False), 'Reviewers Avail.': (2, True), 'Reviewer Groups': (1, True), 'My Reviewers': (1, False)},\n 'heading':'Available Articles',\n 'is_paginated': False,\n # 'article_filter_counts':{'Available': (7, False), 'Assigned': (3, False), 'Unavailable': (1, False), 'Published': (1, False), 'Claimed': (1, False), 'Approved': (2, False), 'Submitted': (2, False), 'Rejected': (1, False)},\n 'hidden_columns':['Reviewer', 'Status', 'Category', 'Length', 'Priority', 'Tags'],\n 'page_obj': None,\n 'all_items_count':11,\n 'paginator': None,\n # 'writer_filter_counts':{'My Writers': (1, False), 'Writers Pending': (1, False), 'Writers Avail.': (2, True), 'Writer Groups': (1, True)},\n 'article_list':Article.objects.filter(pk__in=[2,3,4,5,8,9,15,35, 36, 37, 38]),\n 'object_list':Article.objects.filter(pk__in=[2,3,4,5,8,9,15,35, 36, 37, 38]),\n 'view': InstanceOf(AvailableArticles),\n })", "def test_dashboards_v2_show(self):\n pass", "def test_paging(mocker, testclient):\n # Patch to return list_alerts json data\n with open(\"./test_data/list_alerts_paged.json\") as list_alerts_paged:\n list_alerts_response = json.load(list_alerts_paged)\n with open(\"./test_data/list_alerts_empty.json\") as list_alerts_empty:\n mocker.patch.object(Client, \"_http_request\", side_effect=[\n list_alerts_response,\n json.load(list_alerts_empty),\n ])\n data = testclient.get_paged(40, url_suffix=\"/not_real\", method=\"GET\")\n\n assert len(data) == 29\n calls = [\n call(url_suffix=\"/not_real\", method=\"GET\"),\n call(full_url=list_alerts_response.get(\"paging\").get(\"next\"),\n url_suffix=\"/not_real\",\n method=\"GET\",\n )\n ]\n Client._http_request.assert_has_calls(calls)", "def test_get_items_page(self, mock_requests_get):\n result = resources.get_items_page(1, \"a\", 1)\n\n assert result.total == 97\n\n item = result.items[0]\n assert item.id == 1\n assert item.name == \"Thing\"\n assert item.description == \"A thing\"\n\n assert item.current.price == 100\n assert item.today.price == 110\n\n assert item.members is True\n\n item = result.items[1]\n\n assert item.current.price == 11300\n assert item.today.price == 24400000\n\n assert item.members is False\n\n item = result.items[2]\n\n assert item.current.price == 1800000000\n assert item.today.price == 43657", "def test_mineral_list_view(self):\n resp = self.client.get(reverse('minerals:list'))\n self.assertEqual(resp.status_code, 200)\n for mineral in self.minerals:\n self.assertIn(self.mineral, resp.context['minerals'])\n self.assertContains(resp, self.mineral.short_name)\n self.assertTemplateUsed(resp, 'minerals/mineral_list.html')", "def test_retain_query(self):\n self.assertTrue('?per_page=15' in str(self.response.content))", "def test_public_pages_load(self):\r\n pages = (\r\n reverse('login'),\r\n reverse('signup'),\r\n )\r\n for page in pages:\r\n print(\"Checking '{0}'\".format(page))\r\n self.check_page_get(page, 200)", "def test_index_view_with_no_questions(self):\n response = self.client.get(reverse('polls:index'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"No polls are available.\")\n self.assertQuerysetEqual(response.context['latest_question_list'], \n [])", "def test_index_view_with_no_questions(self):\n response = self.client.get(reverse('polls:index'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"No polls are available.\")\n self.assertQuerysetEqual(response.context['latest_question_list'], [])" ]
[ "0.72028357", "0.63975585", "0.6307497", "0.6142342", "0.6118828", "0.6108139", "0.6106805", "0.61000824", "0.60947084", "0.6088211", "0.6061273", "0.6035597", "0.6009674", "0.5989309", "0.59431523", "0.59197736", "0.5881938", "0.5876856", "0.5876462", "0.58678347", "0.58283126", "0.58113295", "0.5802013", "0.578348", "0.57755405", "0.57736874", "0.5723428", "0.5708474", "0.5705501", "0.57044667" ]
0.78508234
0
Test submitting a new offer. Entry point for route is called offers_show_all.
def test_submit_offer(self, mock_insert): result = self.client.post('offers_show', data=sample_form_data) # After submitting, should redirect to the offers_show page. self.assertEqual(result.status, '302 FOUND') mock_insert.assert_called_with(sample_offer)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_offers_new(self):\n result = self.client.get('/offers_new')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Make an Offer', result.data)", "def test_offers_show_every(self):\n result = self.client.get('/offers_show_every')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Offers', result.data)", "def test_get_offers(self):\n pass", "def test_show_offer(self, mock_find):\n mock_find.return_value = sample_offer\n\n result = self.client.get(f'/offers/{sample_offer_id}')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Description', result.data)", "def test_sell_ticket_posted(self, *_):\n # logout to invalidate any logged in session\n self.open(base_url + '/logout')\n # login a user\n self.open(base_url + '/login')\n # fill email and password\n self.type(\"#email\", \"[email protected]\")\n self.type(\"#password\", \"Test_frontend@\")\n # click enter button\n self.click('input[type=\"submit\"]')\n # open the /sell route\n self.open(base_url)\n # Enter an invalid ticket info, should be redirected to / route\n self.type('#name_sell', \"t1\")\n self.type(\"#price_sell\", \"100\")\n self.type(\"#quantity_sell\", \"2\")\n self.type(\"#exp_date_sell\", \"20200921\")\n self.click('#submit-sell')\n self.assert_element(\"#welcome-header\")\n # Assert that the valid error message is shown.\n self.assert_text(\"Hi test_frontend\", \"#welcome-header\")", "def offers_collection(request):\n if request.method == \"POST\":\n data = json.loads(request.body)\n task = Task.objects.get(id=data.get(\"taskId\", \"\"))\n price = data.get(\"price\", \"\")\n tasker = User.objects.get(username=data.get(\"tasker\", \"\"))\n message = data.get(\"message\", \"\")\n\n offer = Offer(\n task=task,\n price=price,\n tasker=tasker,\n message=message\n )\n offer.save()\n return JsonResponse({\"message\": \"Offer created successfully\"}, status=201)", "def test_post_query_reply_offers(self):\n pass", "def test_offers_edit(self, mock_find):\n mock_find.return_value = sample_offer\n\n result = self.client.get(f'/offers/{sample_offer_id}/edit')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Edit This Offer', result.data)", "def test_create_view(self):\n supplement = SupplementFactory(user=self.user_1)\n time = get_utc_now()\n\n post_data = {\n \"supplement_uuid\": str(supplement.uuid),\n \"time\": time.isoformat(),\n \"quantity\": 5,\n }\n\n response = self.client_1.post(self.url, data=post_data)\n self.assertEqual(response.status_code, 200, response.data)\n\n data = response.data\n supplement_name = data[\"supplement\"][\"name\"]\n self.assertEqual(supplement.name, supplement_name)\n self.assertIsNotNone(data[\"display_name\"])", "def test_edit_offer(self, mock_find):\n mock_find.return_value = sample_offer\n\n result = self.client.get(f'/offers/{sample_offer_id}')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Description', result.data)", "def test_give_feedback_handler(self):\n self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)\n\n # Load demo exploration\n EXP_ID = '0'\n exp_services.delete_demo('0')\n exp_services.load_demo('0')\n\n # Viewer opens exploration\n self.login(self.VIEWER_EMAIL)\n exploration_dict = self.get_json(\n '%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, EXP_ID))\n state_name_1 = exploration_dict['exploration']['init_state_name']\n\n # Viewer gives 1st feedback\n self.post_json(\n '/explorehandler/give_feedback/%s' % EXP_ID,\n {\n 'state_name': state_name_1,\n 'feedback': 'This is a feedback message.',\n }\n )\n\n # Viewer submits answer '0'\n result_dict = self.submit_answer(EXP_ID, state_name_1, '0')\n state_name_2 = result_dict['state_name']\n\n # Viewer gives 2nd feedback\n self.post_json(\n '/explorehandler/give_feedback/%s' % EXP_ID,\n {\n 'state_name': state_name_2,\n 'feedback': 'This is a 2nd feedback message.',\n }\n )\n self.logout()", "def test_creating_supply_admin(self):\n request = self.factory.post(\n '/api/supplies/', {'name': '3d printer', 'state': 'good state', 'description': 'prints 3d objects'})\n force_authenticate(request, user=self.testadmin)\n response = SupplyListView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n try:\n supply = Supply.objects.get(name='3d printer')\n self.assertEqual(supply.name, '3d printer')\n self.assertEqual(supply.state, 'good state')\n self.assertEqual(supply.description, 'prints 3d objects')\n except Supply.DoesNotExist:\n self.fail()", "def test_listing_supplies_admin(self):\n request = self.factory.get(\n '/api/supplies')\n force_authenticate(request, user=self.testadmin)\n response = SupplyListView.as_view()(request)\n # admin can browse the data\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def offer_element(request, offer_id):\n try:\n offer = Offer.objects.get(id=offer_id)\n except Offer.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == \"GET\":\n serializer = OfferSerializer(offer)\n return Response(serializer.data)\n\n elif request.method == \"PUT\":\n data = json.loads(request.body)\n \n offer.price = data.get(\"price\", \"\")\n offer.message = data.get(\"message\", \"\")\n \n offer.save()\n return JsonResponse({\"message\": \"Offer updated successfully\"}, status=204)\n\n elif request.method == \"DELETE\":\n offer.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def test_view_booking(client):\n response = client.post(\n BOOKING_API_URL + '/view',\n data=dict(\n booking_id=1\n ),\n content_type='multipart/form-data'\n )\n\n assert b'Booking ID: 1' in response.data\n assert b'Car ID: </strong> 1' in response.data\n assert b'User ID: </strong> 1' in response.data\n assert b'Pickup time: </strong> ' + \\\n str.encode(PICKUP_DATE.strftime(\n DEFAULT_DATETIME_FORMAT)) in response.data\n assert b'Return time: </strong> ' + \\\n str.encode(RETURN_DATE.strftime(\n DEFAULT_DATETIME_FORMAT)) in response.data\n assert b'Canceled' in response.data # previous test canceled this booking.", "def test_create_single_poll_submission(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_subscribe_offer(self):\n pass", "def test_listing_supplies_user(self):\n request = self.factory.get(\n '/api/supplies')\n force_authenticate(request, user=self.testuser1)\n response = SupplyListView.as_view()(request)\n # normal user can browse the data\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_submit_for_endorsement(self):", "def offers(self, offers):\n\n self._offers = offers", "def test_sync_biz_to_sugar_offer(self):\n LOG.debug('test_sync_biz_to_sugar_offer')\n #business = Business.objects.get(id=114)\n consumer = Consumer.objects.get(id=300)\n consumer.first_name = 'Danielle'\n consumer.last_name = 'Dongo'\n consumer.save()\n offer = Offer.objects.get(id=300)\n offer.create_datetime = datetime.datetime.now()\n offer.save()\n sync_business_to_sugar(offer=offer, sugar=self.sugar)\n module = \"Accounts\"\n query = build_recent_entry_query(module=module, test_mode=True, \n get_modified=False, start=None)\n sugar_list = self.sugar.get_entry_list(module, query)\n self.assertTrue(sugar_list[0]['id'] != -1)\n self.assertEquals(sugar_list[0]['email1'], \n offer.business.advertiser.email)", "def test_store(self):\n self.selenium.get('{}/store'.format(self.live_server_url))", "def test_01_service_offerings(self):\n # Validate the following\n # 1. Create a project.\n # 2. List service offerings for the project. All SO available in the\n # domain can be used for project resource creation.\n\n # Create project as a domain admin\n project = Project.create(\n self.apiclient,\n self.services[\"project\"],\n account=self.account.name,\n domainid=self.account.domainid\n )\n # Cleanup created project at end of test\n self.cleanup.append(project)\n self.debug(\"Created project with domain admin with ID: %s\" %\n project.id)\n\n self.debug(\n \"Deploying VM instance for project: %s & service offering: %s\" % (\n project.id,\n self.service_offering.id\n ))\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n return", "def test_submit_form_using_valid_data():", "def test_editing_supplies_admin(self):\n id = self.testsupply.id\n request = self.factory.put(\n '/api/supplies/1/', {'name': '3d printer', 'state': 'broken'})\n force_authenticate(request, user=self.testadmin)\n response = SupplyDetailsView.as_view()(request, pk=id)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(Supply.objects.get(id=id).state, 'broken')", "def test_request_form_successful(self):\n response = self.client.get(reverse(\n 'form', kwargs={'slug': self.agency.slug}))\n self.assertContains(response, self.agency.name)", "def test_wallets_post(self):\n pass", "def helper_test_create_vessel(self):\n url = reverse('vessel-create')\n payload = json.dumps({\n \"code\": \"MV101\"\n })\n response = self.post(url, payload)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "async def test_accept_agency_offer(client):\n headers = { \n 'Accept': 'text/plain',\n 'Authorization': 'Bearer special-key',\n }\n response = await client.request(\n method='PUT',\n path='/vms/api/v1/bookingRequests/{booking_request_id}/offers/{offer_id}/accept'.format(booking_request_id='booking_request_id_example', offer_id='offer_id_example'),\n headers=headers,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_faq_view(self):\n response = self.client.get(url_for('main.faq'))\n self.assertEqual(response.status_code, 200)" ]
[ "0.7137048", "0.673148", "0.67147416", "0.6314298", "0.6304351", "0.61012447", "0.6049734", "0.6017124", "0.59592426", "0.58525443", "0.57838637", "0.573978", "0.5655164", "0.5545433", "0.5451833", "0.54337096", "0.5407538", "0.5388778", "0.53809834", "0.5359097", "0.5352999", "0.5344542", "0.53405815", "0.53341585", "0.5298631", "0.52787364", "0.5276311", "0.5272971", "0.52545434", "0.5251726" ]
0.74099815
0
Test showing a single offer.
def test_show_offer(self, mock_find): mock_find.return_value = sample_offer result = self.client.get(f'/offers/{sample_offer_id}') self.assertEqual(result.status, '200 OK') self.assertIn(b'Description', result.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_offers_show_every(self):\n result = self.client.get('/offers_show_every')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Offers', result.data)", "def test_get_offers(self):\n pass", "def test_offers_new(self):\n result = self.client.get('/offers_new')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Make an Offer', result.data)", "def test_edit_offer(self, mock_find):\n mock_find.return_value = sample_offer\n\n result = self.client.get(f'/offers/{sample_offer_id}')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Description', result.data)", "def test_offers_edit(self, mock_find):\n mock_find.return_value = sample_offer\n\n result = self.client.get(f'/offers/{sample_offer_id}/edit')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Edit This Offer', result.data)", "def test_show_rating(self):\n self.assertEqual(self.show.rating, None)", "def offer(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"offer\")", "def test_display_review(self):\n\n result = self.client.get(\"/brand/P87985432\")\n self.assertIn(b\"ever ever\", result.data)", "def test_submit_offer(self, mock_insert):\n result = self.client.post('offers_show', data=sample_form_data)\n\n # After submitting, should redirect to the offers_show page.\n self.assertEqual(result.status, '302 FOUND')\n mock_insert.assert_called_with(sample_offer)", "def testShow(self):\n response = requests.get(url=self.url)\n headers = response.headers\n json_data = response.json()\n\n self.assertEqual(response.status_code, 200, WRONG_STATUS_CODE_MSG)\n self.assertEqual(\n headers['Content-Type'], 'application/json', WRONG_TYPE_RETURN_MSG)\n self.assertIn('name', json_data, MISSING_NAME_ATTR_MSG)\n self.assertIn('number_rooms', json_data, MISSING_ROOM_NB_ATTR_MSG)\n self.assertIn('number_bathrooms', json_data,\n MISSING_BATHROOM_NB_ATTR_MSG)\n self.assertIn('price_by_night', json_data,\n MISSING_PRICE_BY_NIGHT_ATTR_MSG)\n self.assertIn('user_id', json_data, MISSING_USER_ID_ATTR_MSG)\n self.assertIn('city_id', json_data, MISSING_CITY_ID_ATTR_MSG)\n self.assertIn('created_at', json_data, MISSING_CREATED_AT_ATTR_MSG)\n self.assertIn('updated_at', json_data, MISSING_UPDATED_AT_ATTR_MSG)\n self.assertIn('__class__', json_data, MISSING_CLASS_ATTR_MSG)\n self.assertEqual(json_data['name'], self.place.name)\n self.assertEqual(json_data['number_rooms'], self.place.number_rooms)\n self.assertEqual(json_data['number_bathrooms'],\n self.place.number_bathrooms)\n self.assertEqual(json_data['price_by_night'],\n self.place.price_by_night)\n self.assertEqual(json_data['user_id'], self.place.user_id)\n self.assertEqual(json_data['city_id'], self.place.city_id)", "def test_show_sneaker(self, mock_find):\n mock_find.return_value = sample_sneaker\n\n result = self.client.get(f'/sneakers/{sample_sneaker_id}')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'sneakers', result.data)", "def test_subscribe_offer(self):\n pass", "def test_is_product_show(self):\n\n self.selenium.get(\"http://localhost:8000/\")\n response = self.selenium.find_element(By.ID, \"id_product_name\")\n response.send_keys(\"frosties\")\n response.send_keys(Keys.ENTER)\n self.assertTemplateUsed('selected_product.html')", "def test_whom_will_show(self):\n must_be_first = About_me.objects.first()\n response = self.client.get(reverse('index'))\n self.assertContains(response, must_be_first.name)", "def makes_offer(self) -> object:\n return self._makes_offer", "def test_show_bag(self):\n response = self.client.get('/shopping_bag/')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'shopping_bag/bag.html')", "def offer_element(request, offer_id):\n try:\n offer = Offer.objects.get(id=offer_id)\n except Offer.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == \"GET\":\n serializer = OfferSerializer(offer)\n return Response(serializer.data)\n\n elif request.method == \"PUT\":\n data = json.loads(request.body)\n \n offer.price = data.get(\"price\", \"\")\n offer.message = data.get(\"message\", \"\")\n \n offer.save()\n return JsonResponse({\"message\": \"Offer updated successfully\"}, status=204)\n\n elif request.method == \"DELETE\":\n offer.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def test_show_animes(self, mock_find):\n mock_find.return_value = sample_AnimeArt\n\n result = self.client.get(f'/animes/{sample_AnimeArt_id}')\n self.assertEqual(result.status, '200 OK')", "def test_listOffering(self):\n name = 'offering-name'\n self.userbase('install')\n realm = IRealm(self.store)\n substoreItem = SubStore.createNew(self.store, ('app', name))\n realm.addAccount(name, None, None, internal=True,\n avatars=substoreItem)\n output = self.userbase('list')\n self.assertEqual(output, [name])", "def test_display_favorite(self):\n\n result = self.client.get(\"/view_favorites\")\n self.assertIn(b\"s1925148\", result.data)", "def test_dashboards_v2_show(self):\n pass", "def test_show(self):\n\n with self.client as c:\n response = c.get(f\"/users/{self.testuser.id}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"@alice\", str(response.data))", "def get_offer(request, offer_id):\n offer = get_object_or_404(Offers, pk=offer_id)\n serializer = OffersSerializer(offer)\n return JsonResponse(serializer.data, safe=False)", "def test_listing_supplies_admin(self):\n request = self.factory.get(\n '/api/supplies')\n force_authenticate(request, user=self.testadmin)\n response = SupplyListView.as_view()(request)\n # admin can browse the data\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_get_product_rate_plan_by_id(self):\n pass", "def test_counter_proposal_offer(self):\n pass", "def test_listing_supplies_user(self):\n request = self.factory.get(\n '/api/supplies')\n force_authenticate(request, user=self.testuser1)\n response = SupplyListView.as_view()(request)\n # normal user can browse the data\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def _create_offer(\n org,\n offer_item_name='Test Item',\n offer_limit=None,\n currents_share=25,\n is_master=False\n):\n offer_item = Item(name=offer_item_name)\n offer_item.save()\n\n offer = Offer(\n org=org,\n item=offer_item,\n currents_share=currents_share,\n is_master=is_master\n )\n\n if offer_limit:\n offer.limit = offer_limit\n\n offer.save()\n\n return offer", "def test_view_product_detail(self):\n product = sample_product(supplier_id=self.user)\n\n url = detail_url(product.id)\n res = self.client.get(url)\n\n serializer = ProductSerializer(product)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)", "def test_show_game(self, mock_find):\n mock_find.return_value = sample_game\n\n result = self.client.get(f'game/{sample_game_id}')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Fable', result.data)" ]
[ "0.7261696", "0.6998221", "0.6710632", "0.64315236", "0.6270281", "0.6039189", "0.60301286", "0.59781045", "0.59634453", "0.5904044", "0.5892867", "0.5879203", "0.5841947", "0.5778148", "0.57217896", "0.5689361", "0.56728566", "0.5562918", "0.55520946", "0.5543369", "0.5541635", "0.5528956", "0.5526121", "0.55091083", "0.54959106", "0.5462855", "0.5460804", "0.5433011", "0.54142165", "0.5400492" ]
0.77325225
0
Test rendering of the edit offer form.
def test_offers_edit(self, mock_find): mock_find.return_value = sample_offer result = self.client.get(f'/offers/{sample_offer_id}/edit') self.assertEqual(result.status, '200 OK') self.assertIn(b'Edit This Offer', result.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_edit_offer(self, mock_find):\n mock_find.return_value = sample_offer\n\n result = self.client.get(f'/offers/{sample_offer_id}')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Description', result.data)", "def test_edit(self):\n # Test using the Trovebox class\n html = self.client.photo.edit(self.photos[0])\n self.assertIn(\"<form\", html.lower())\n\n # And the Photo object directly\n html = self.photos[0].edit()\n self.assertIn(\"<form\", html.lower())", "def edit_form():\n return template (\"edit\")", "def test_show_tag_edit(self):\r\n\r\n with app.test_client() as client:\r\n resp = client.get(f\"/tags/{self.tag.id}/edit\")\r\n html = resp.get_data(as_text=True)\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn(\"Edit Tag\", html)\r\n self.assertIn(self.tag.name, html)", "def test_publication_view_edit(self):\n \n test_response = self.client.get('/papers/14-3-3-proteins-a-number-of-functions-for-a-numbered-protein/edit/')\n self.assertEqual(test_response.status_code, 200)\n self.assertTrue('publication' in test_response.context) \n self.assertTemplateUsed(test_response, 'base.html')\n self.assertTemplateUsed(test_response, 'publication_form.html') \n self.assertEqual(test_response.context['publication'].pk, 1)\n self.assertEqual(test_response.context['publication'].title, u'14-3-3 proteins: a number of functions for a numbered protein.')\n\n #verifies that a non-existent object returns a 404 error presuming there is no object with pk=2.\n null_response = self.client.get('/papers/not-a-real-paper/edit/')\n self.assertEqual(null_response.status_code, 404)", "def test_render_purchase_form_html(self, render):\r\n student1 = UserFactory()\r\n student1.save()\r\n\r\n order1 = Order.get_cart_for_user(student1)\r\n item1 = OrderItem(order=order1, user=student1, unit_cost=1.0, line_cost=1.0)\r\n item1.save()\r\n html = render_purchase_form_html(order1)\r\n ((template, context), render_kwargs) = render.call_args\r\n\r\n self.assertEqual(template, 'shoppingcart/cybersource_form.html')\r\n self.assertDictContainsSubset({'amount': '1.00',\r\n 'currency': 'usd',\r\n 'orderPage_transactionType': 'sale',\r\n 'orderNumber': str(order1.id)},\r\n context['params'])", "def test_editing_supplies_admin(self):\n id = self.testsupply.id\n request = self.factory.put(\n '/api/supplies/1/', {'name': '3d printer', 'state': 'broken'})\n force_authenticate(request, user=self.testadmin)\n response = SupplyDetailsView.as_view()(request, pk=id)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(Supply.objects.get(id=id).state, 'broken')", "def get_edit_form(self, data):\n self.add_success(data)\n rv = self.get((data[self.id_field], self.edit_url))\n assert not is_404(rv)\n assert in_response(rv, 'Edit {}'.format(data[self.name_field]))\n for field, name in self.fields:\n assert in_response(rv, name)\n return rv", "def test_edit_button_appears(self):\n response = self.client.get(reverse('wagtailnews:index', kwargs={\n 'pk': self.index.pk}))\n self.assertContains(response, self.url)", "def render_form():", "def test_review_form(self):\n\n result = self.client.get(\"/brand/P87985432\")\n self.assertIn(b\"review_form\", result.data)", "def test_edit(test_case, widget):\n ensure_engine_exists()\n # Create schem\n Base.metadata.create_all(get_engine())\n # Create editing UI\n edit_ui = type(\"CustomEditUI\", (Ui_EditForm, PQSEditUI), {})\n ui, s, u = None, None, None\n # Switch test\n print(test_case)\n if test_case == \"add\":\n ui = edit_ui(User)\n elif test_case == \"edit-transient\":\n ui = edit_ui(User(email=\"[email protected]\", number=1))\n else:\n s = SessionFactory()\n if test_case == \"edit-pending\":\n u = User(email=\"[email protected]\", number=2)\n s.add(u)\n ui = edit_ui(u, s)\n elif test_case == \"edit-persistent\":\n u = User(email=\"[email protected]\", number=3,\n birthdate=datetime.date(1995, 9, 21))\n s.add(u)\n s.commit()\n ui = edit_ui(u, s)\n elif test_case == \"edit-persistent-wrong\":\n s.execute(\"INSERT INTO users (email, number, birthdate) VALUES (\" +\n \"\\\"persistent-wrongusers.org\\\", 4, \\\"2099-12-12\\\")\")\n s.commit()\n u = s.query(User).filter(\n User.email == \"persistent-wrongusers.org\").first()\n ui = edit_ui(u, s)\n # Create UI\n ui.setup_and_bind(widget, Ui_UserForm)\n return ui, s, u", "def test_edit_view(self):\n self.client.post(reverse('misago:admin:users:bans:new'), data={\n 'check_type': '0',\n 'banned_value': 'Admin',\n })\n\n test_ban = Ban.objects.get(banned_value='admin')\n form_link = reverse('misago:admin:users:bans:edit', kwargs={'pk': test_ban.pk})\n\n response = self.client.post(form_link, data={\n 'check_type': '1',\n 'banned_value': '[email protected]',\n 'user_message': 'Lorem ipsum dolor met',\n 'staff_message': 'Sit amet elit',\n 'expires_on': '',\n })\n self.assertEqual(response.status_code, 302)\n\n response = self.client.get(reverse('misago:admin:users:bans:index'))\n response = self.client.get(response['location'])\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, '[email protected]')", "def test_commentary_view_edit(self):\n \n test_response = self.client.get('/papers/commentary/1/edit')\n self.assertEqual(test_response.status_code, 200)\n self.assertTrue('commentary' in test_response.context) \n self.assertTemplateUsed(test_response, 'base.html')\n self.assertTemplateUsed(test_response, 'commentary-form.html') \n self.assertTemplateUsed(test_response, 'analytics_tracking.html')\n self.assertEqual(test_response.context['commentary'].pk, 1)\n self.assertEqual(test_response.context['commentary'].paper.__unicode__(), u'14-3-3 proteins: a number of functions for a numbered protein.') \n self.assertEqual(test_response.context['commentary'].comments, \"some comments for this fixture\") \n \n #verifies that a non-existent object returns a 404 error.\n null_response = self.client.get('/papers/commentary/9999/edit')\n self.assertEqual(null_response.status_code, 404)", "def test_edit_mdt(self):\n response = self.client.get(reverse('edit-mdt', args=[self.sample_type, self.mdt.id]))\n self.assertContains(response, self.proband.gel_id)\n self.assertEqual(response.status_code, 200)", "def test_get_edit(self):\n response = self.get('/study/edit/1')\n self.assertEqual(response.code, 200)\n self.assertNotEqual(str(response.body), \"\")", "def test_submit_offer(self, mock_insert):\n result = self.client.post('offers_show', data=sample_form_data)\n\n # After submitting, should redirect to the offers_show page.\n self.assertEqual(result.status, '302 FOUND')\n mock_insert.assert_called_with(sample_offer)", "def test_edit(self):\n\n\t\titem_id = mock_item()[0]\n\n\t\titem_data = {'title': 'Item Two', 'author': 'Author Two',\n\t\t\t'location': 'Location Two'}\n\t\tmodels.edit(item_id, item_data)\n\n\t\titem = models.item(item_id)\n\n\t\tself.assertEqual(item['title'], item_data['title'])\n\t\tself.assertEqual(item['author'], item_data['author'])\n\t\tself.assertEqual(item['location'], item_data['location'])", "def test_tab_preview_html(self):\r\n preview_url = '/xblock/{}/student_view'.format(self.test_tab.location)\r\n\r\n resp = self.client.get(preview_url, HTTP_ACCEPT='application/json')\r\n self.assertEqual(resp.status_code, 200)\r\n resp_content = json.loads(resp.content)\r\n html = resp_content['html']\r\n\r\n # Verify that the HTML contains the expected elements\r\n self.assertIn('<span class=\"action-button-text\">Edit</span>', html)\r\n self.assertIn('<span class=\"sr\">Duplicate this component</span>', html)\r\n self.assertIn('<span class=\"sr\">Delete this component</span>', html)\r\n self.assertIn('<span data-tooltip=\"Drag to reorder\" class=\"drag-handle action\"></span>', html)", "def test_aid_edition_view(client, contributor, aid_form_data):\n\n aid = AidFactory(name='First title', author=contributor)\n # Anonymous, no access\n form_url = reverse('aid_edit_view', args=[aid.slug])\n res = client.get(form_url)\n assert res.status_code == 302\n\n # Logged contributor, access granted\n client.force_login(contributor)\n res = client.get(form_url)\n assert res.status_code == 200\n\n aids = Aid.objects.filter(author=contributor)\n assert aids.count() == 1\n\n aid_form_data['name'] = 'New title'\n res = client.post(form_url, data=aid_form_data)\n assert res.status_code == 302\n assert aids.count() == 1\n assert aids[0].name == 'New title'\n assert aids[0].author == contributor", "def test_public_component_preview_html(self):\r\n self.validate_preview_html(self.video, 'student_view',\r\n can_edit=True, can_reorder=True, can_add=False)", "def test_form_editing(self):\n update = {\n 'title': 'Last Post (Final)',\n 'content': '### Goodbye!',\n 'is_published': True,\n }\n\n form = self.form_cls(update, instance=self.entry)\n\n form.save()\n\n actual = models.Entry.objects.get(pk=self.entry.pk)\n self.assertEquals(actual.title, update['title'])\n self.assertEquals(actual.content.raw, update['content'])\n self.assertIsNotNone(actual.published_timestamp)", "def test_make_form_hidden():", "def edit(self):\n\n pass", "def test_edit_boat(self):\n pass", "def test_show_offer(self, mock_find):\n mock_find.return_value = sample_offer\n\n result = self.client.get(f'/offers/{sample_offer_id}')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Description', result.data)", "def test_edit_has_populated_form(testapp, fill_the_db, login_testcase):\n response = testapp.get('/journal/1/edit-entry', status=200)\n title = response.html.find_all('input')[1]['value']\n body = response.html.form.textarea.contents[0]\n assert title == ENTRIES[0]['title']\n assert body == ENTRIES[0]['body']", "def edit(self, **kwargs):\n ...", "def test_editing_post(self):\n\n form_data = {\"meal-time\": \"2020-02-25 08:00:00\", \n \"meal-setting\": \"At home!\", \"TEB\": \"Some thoughts..\",\n \"hunger\": 2, \"fullness\": 3, \"meal-notes\": \"Some notes.\"}\n\n edit_post(1, \"/static/images/uploads/2.jpg\", form_data)\n\n post = Post.query.get(1)\n \n self.assertEqual(post.meal_setting, \"At home!\")\n self.assertEqual(post.satisfaction, None)\n self.assertNotEqual(post.fullness, 8)", "def test_inmate_edit_view(self):\n self.test_inmate_validation() # cheap way to get an inmate\n inmate = models.Inmate.objects.all()[0]\n c = Client()\n response = c.get('/lemur/inmate/edit/' + str(inmate.pk), follow=True)\n self.assertEqual(response.status_code, 200)" ]
[ "0.7102556", "0.6937645", "0.65472555", "0.6311472", "0.6263482", "0.61965406", "0.6167895", "0.615845", "0.6157788", "0.612634", "0.61004084", "0.60736597", "0.60716546", "0.606673", "0.6046549", "0.6044666", "0.60096306", "0.6009348", "0.60079074", "0.60064816", "0.5977993", "0.59617645", "0.59610575", "0.5959773", "0.59583724", "0.59374815", "0.59007394", "0.58937025", "0.58922625", "0.58706325" ]
0.7367693
0
Test submitted an edited offer.
def test_edit_offer(self, mock_find): mock_find.return_value = sample_offer result = self.client.get(f'/offers/{sample_offer_id}') self.assertEqual(result.status, '200 OK') self.assertIn(b'Description', result.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_offers_edit(self, mock_find):\n mock_find.return_value = sample_offer\n\n result = self.client.get(f'/offers/{sample_offer_id}/edit')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Edit This Offer', result.data)", "def test_submit_offer(self, mock_insert):\n result = self.client.post('offers_show', data=sample_form_data)\n\n # After submitting, should redirect to the offers_show page.\n self.assertEqual(result.status, '302 FOUND')\n mock_insert.assert_called_with(sample_offer)", "def test_edited(self):\n event = Event.objects.all()[0]\n\n todo = TodoItem.objects.create(\n event=event, completed=False, title=\"Test TODO4\",\n due=datetime.date.today(), additional=\"\",\n )\n\n url, form = self._get_initial_form('todo_edit', todo.pk)\n form['title'] = \"Test TODO4 - new title\"\n form['completed'] = True\n form['additional'] = ''\n\n rv = self.client.post(reverse('todo_edit', args=[todo.pk]), form)\n assert rv.status_code == 302\n\n todo.refresh_from_db()\n\n assert todo.title == \"Test TODO4 - new title\"\n assert todo.completed is True", "def test_successful_article_edit(self):\n saved_article = self.create_article()\n url = saved_article[0]\n token = saved_article[2]\n response = self.test_client.put(url, self.article_update_data, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['message'], \"Article has been successfully updated.\")", "def test_edit_boat(self):\n pass", "def test_offers_new(self):\n result = self.client.get('/offers_new')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Make an Offer', result.data)", "def test_can_edit_ride_offer(self):\n response = self.app.put('/api/v1/users/rides/1',\n data=json.dumps(self.ride),\n content_type='application/json',\n headers=self.headers)\n self.assertEqual(response.status_code, 200)\n response_data = json.loads(response.get_data().decode('utf-8'))\n self.assertEqual(response_data['start point'],'Juja')", "def test_expense_can_be_edited(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n rv = self.client().post(\n '/expenses/', headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.expense)\n self.assertEqual(rv.status_code, 201)\n rv = self.client().put(\n '/expenses/1', headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"chargers\"\n })\n self.assertEqual(rv.status_code, 200)\n results = self.client().get('/expenses/1', headers=dict(Authorization=\"Bearer \" + access_token))\n res = json.loads(results.data)\n self.assertEqual('chargers', str(res['name']))", "def test_update_item_using_post(self):\n pass", "def test_editing_supplies_user(self):\n id = self.testsupply.id\n oldstate = self.testsupply.state\n request = self.factory.put(\n '/api/supplies/%s/' % id, {'name': '3d printer', 'state': 'aaa'})\n force_authenticate(request, user=self.testuser1)\n response = SupplyDetailsView.as_view()(request, pk=id)\n # normal user should get forbidden error\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n # data should not change\n self.assertEqual(Supply.objects.get(id=id).state, oldstate)", "def test_edit(self):\n\n\t\titem_id = mock_item()[0]\n\n\t\titem_data = {'title': 'Item Two', 'author': 'Author Two',\n\t\t\t'location': 'Location Two'}\n\t\tmodels.edit(item_id, item_data)\n\n\t\titem = models.item(item_id)\n\n\t\tself.assertEqual(item['title'], item_data['title'])\n\t\tself.assertEqual(item['author'], item_data['author'])\n\t\tself.assertEqual(item['location'], item_data['location'])", "def test_edit_view(self):\n self.client.post(reverse('misago:admin:users:bans:new'), data={\n 'check_type': '0',\n 'banned_value': 'Admin',\n })\n\n test_ban = Ban.objects.get(banned_value='admin')\n form_link = reverse('misago:admin:users:bans:edit', kwargs={'pk': test_ban.pk})\n\n response = self.client.post(form_link, data={\n 'check_type': '1',\n 'banned_value': '[email protected]',\n 'user_message': 'Lorem ipsum dolor met',\n 'staff_message': 'Sit amet elit',\n 'expires_on': '',\n })\n self.assertEqual(response.status_code, 302)\n\n response = self.client.get(reverse('misago:admin:users:bans:index'))\n response = self.client.get(response['location'])\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, '[email protected]')", "def test_editing_supplies_admin(self):\n id = self.testsupply.id\n request = self.factory.put(\n '/api/supplies/1/', {'name': '3d printer', 'state': 'broken'})\n force_authenticate(request, user=self.testadmin)\n response = SupplyDetailsView.as_view()(request, pk=id)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(Supply.objects.get(id=id).state, 'broken')", "def test_handle_edit(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n team = Team(\"BRS\", \"brs\", \"brS\")\n team.platform = \"web\"\n team_attach = [team.get_attachment()]\n team.platform = \"iOS\"\n team.display_name = \"b-s\"\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n with self.app.app_context():\n resp, code = self.testcommand.handle(\"team edit brs \"\n \"--name brS \"\n \"--platform web\", user)\n expect = {'attachments': team_attach,\n 'text': 'Team edited: brs, '\n 'name: brS, '\n 'platform: web'}\n self.assertDictEqual(resp, expect)\n self.assertEqual(code, 200)\n self.db.store.assert_called_once_with(team)", "def test_edit_answer(self):\n user = self.create_user()\n user_id = user[0] # answer author user id\n question_id = int(self.create_question()[0])\n # token should be encoded with the id of the answer author\n auth_token = user[1]\n new_answer = self.post_data(question_id, auth_token=auth_token).json\n answer_id = int(new_answer['answer_id'])\n headers = {\"Authorization\":\"Bearer {}\".format(auth_token)}\n path = \"/api/v2/questions/{}/answers/{}\".format(question_id,\n answer_id)\n data = {\"text\":\"edited answer\"}\n result = self.client.put(path,\n headers=headers,\n data=json.dumps(data),\n content_type='application/json')\n self.assertEqual(result.status_code, 200)\n self.assertEqual(result.json['value'], data['text'])", "def test_sell_ticket_posted(self, *_):\n # logout to invalidate any logged in session\n self.open(base_url + '/logout')\n # login a user\n self.open(base_url + '/login')\n # fill email and password\n self.type(\"#email\", \"[email protected]\")\n self.type(\"#password\", \"Test_frontend@\")\n # click enter button\n self.click('input[type=\"submit\"]')\n # open the /sell route\n self.open(base_url)\n # Enter an invalid ticket info, should be redirected to / route\n self.type('#name_sell', \"t1\")\n self.type(\"#price_sell\", \"100\")\n self.type(\"#quantity_sell\", \"2\")\n self.type(\"#exp_date_sell\", \"20200921\")\n self.click('#submit-sell')\n self.assert_element(\"#welcome-header\")\n # Assert that the valid error message is shown.\n self.assert_text(\"Hi test_frontend\", \"#welcome-header\")", "def test_aid_edition_view(client, contributor, aid_form_data):\n\n aid = AidFactory(name='First title', author=contributor)\n # Anonymous, no access\n form_url = reverse('aid_edit_view', args=[aid.slug])\n res = client.get(form_url)\n assert res.status_code == 302\n\n # Logged contributor, access granted\n client.force_login(contributor)\n res = client.get(form_url)\n assert res.status_code == 200\n\n aids = Aid.objects.filter(author=contributor)\n assert aids.count() == 1\n\n aid_form_data['name'] = 'New title'\n res = client.post(form_url, data=aid_form_data)\n assert res.status_code == 302\n assert aids.count() == 1\n assert aids[0].name == 'New title'\n assert aids[0].author == contributor", "def test_editing_comment(self):\n\n data = {\"comment\": \"Edited comment body.\"}\n result = self.client.post(\"/comment/1/edit.json\", data=data)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Edited comment\", result.data)", "def test_update_submission_service(self):\n pass", "def test_edit_event(self):\n event = mongo.db.events.find_one()\n ids = event.get('_id')\n res = self.client.post('/edit-event/{}'.format(ids),\n follow_redirects=True, data=dict(\n event_type='Club Hosted Event',\n location='Letterkenny',\n date='20 September 2020',\n description='Test',\n organiser='Unicorn MCC'\n ))\n\n data = res.data.decode('utf-8')\n assert res.status == '200 OK'\n # Check for updated event_type\n assert 'Club Hosted Event' in data", "def test_edit_alert_by_id(self):\n pass", "def test_plusrecords_on_edit(self):\n self.go200('minus_plus_upload')\n self.formfile('minus_plus_upload', 'file', AUDIO_FILE)\n self.submit200()\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.submit200()\n self.go200('minus_plus_upload')\n self.formfile('minus_plus_upload', 'file', AUDIO_FILE)\n self.submit200()\n self.assert_equal(MinusPlusRecord.objects.count(), 2)\n self.go200('minus_edit', [self.superuser, 1])\n self.fv('minus_upload', 'author', \"brams\")\n self.submit200()\n self.find('Плюс')\n self.assert_equal(MinusPlusRecord.objects.count(), 1)", "def test_resuableitem_submit_changerequest_public_owner_accept(self):\n\n original_reusableitem = make_reusable_item_public(self.reusableitem_1.id)\n\n # Create a second list for the owner\n create_toptenlist(self, 'user_1', '1_2')\n\n # And set one of its items to reference the reusable item\n # This allows the count of users to be checked\n reference_reusable_item(self, 'user_1', self.reusableitem_1.id, 'toptenlist_1_2', 4)\n\n # submit the change request\n data1 = submit_change_request_1(self, self.user_1)\n updated_reusableitem = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n # editable values have been updated\n self.assertEqual(updated_reusableitem.name, data1['name'])\n self.assertEqual(updated_reusableitem.definition, data1['definition'])\n self.assertEqual(updated_reusableitem.link, data1['link'])\n\n # the name of all referencing top ten items should be updated\n toptenitems1 = self.toptenlist_1.topTenItem.all()\n toptenitem_1_id = toptenitems1[0].id\n updated_user1_toptenitem1 = TopTenItem.objects.get(pk=toptenitem_1_id)\n self.assertEqual(updated_user1_toptenitem1.name, data1['name'])\n\n toptenitems2 = self.toptenlist_1_2.topTenItem.all()\n toptenitem_2_id = toptenitems2[4].id\n updated_user1_toptenitem2 = TopTenItem.objects.get(pk=toptenitem_2_id)\n self.assertEqual(updated_user1_toptenitem2.name, data1['name'])\n\n # history has been updated\n history_entry = updated_reusableitem.history[1]\n self.assertNotEqual(history_entry, None)\n self.assertEqual(history_entry['change_request_resolution'], 'accepted')\n self.assertNotEqual(history_entry['changed_request_resolved_at'], None)\n self.assertEqual(history_entry['changed_request_submitted_by_id'], self.user_1.id.__str__())\n self.assertEqual(history_entry['number_of_users'], 1)\n self.assertEqual(history_entry['change_request_votes_yes_count'], 1)\n self.assertEqual(history_entry['change_request_votes_no_count'], 0)\n\n self.assertEqual(history_entry['change_request']['name'], data1['name'])\n self.assertEqual(history_entry['change_request']['definition'], data1['definition'])\n self.assertEqual(history_entry['change_request']['link'], data1['link'])\n\n # there should not be a notification\n self.assertEqual(Notification.objects.count(), 0)\n\n # add a second reference to this reusable item, by a different user\n create_toptenlist(self, 'user_2', 2)\n reference_reusable_item(self, 'user_2', self.reusableitem_1.id, 'toptenlist_2', 0)\n\n # owner can propose a change request\n # it does not update immediately\n self.client.force_authenticate(user=self.user_1)\n data2 = {'name': 'Agatha Christie 2', 'definition': 'A writer 2', 'link': 'someurl2'}\n response = self.client.patch(get_reusable_item_1_url(self), data2, format='json')\n\n updated_reusableitem = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # editable properties unchanged\n self.assertEqual(updated_reusableitem.name, data1['name'])\n self.assertEqual(updated_reusableitem.definition, data1['definition'])\n self.assertEqual(updated_reusableitem.link, data1['link'])\n\n # change request created\n self.assertEqual(updated_reusableitem.change_request['name'], data2['name'])\n self.assertEqual(updated_reusableitem.change_request['definition'], data2['definition'])\n self.assertEqual(updated_reusableitem.change_request['link'], data2['link'])\n\n # user 1 has voted for it\n self.assertEqual(updated_reusableitem.change_request_votes_no.count(), 0)\n self.assertEqual(updated_reusableitem.change_request_votes_yes.count(), 1)\n self.assertEqual(updated_reusableitem.change_request_votes_yes.first(), self.user_1)\n\n # User 2 should get a notification of the change request\n self.assertEqual(Notification.objects.count(), 1)\n\n notification = Notification.objects.first()\n self.assertEqual(notification.created_by, self.user_2)\n self.assertEqual(notification.context, 'reusableItem')\n self.assertEqual(notification.event, 'changeRequestCreated')\n self.assertEqual(notification.reusableItem, updated_reusableitem)\n\n # delete any notifications prior to the next step\n Notification.objects.all().delete()\n self.assertEqual(Notification.objects.count(), 0)\n\n # user 2 now votes for the change request\n self.client.force_authenticate(user=self.user_2)\n\n data3 = {'vote': 'yes'}\n response = self.client.patch(get_reusable_item_1_url(self), data3, format='json')\n\n updated_reusableitem = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # it should be resolved\n self.assertEqual(updated_reusableitem.change_request, None)\n self.assertEqual(updated_reusableitem.change_request_votes_no.count(), 0)\n self.assertEqual(updated_reusableitem.change_request_votes_yes.count(), 0)\n\n self.assertEqual(updated_reusableitem.name, data2['name'])\n self.assertEqual(updated_reusableitem.definition, data2['definition'])\n self.assertEqual(updated_reusableitem.link, data2['link'])\n\n # the name of all referencing top ten items should be updated\n toptenitems1 = self.toptenlist_1.topTenItem.all()\n toptenitem_1_id = toptenitems1[0].id\n updated_user1_toptenitem1 = TopTenItem.objects.get(pk=toptenitem_1_id)\n self.assertEqual(updated_user1_toptenitem1.name, data2['name'])\n\n toptenitems2 = self.toptenlist_1_2.topTenItem.all()\n toptenitem_2_id = toptenitems2[4].id\n updated_user1_toptenitem2 = TopTenItem.objects.get(pk=toptenitem_2_id)\n self.assertEqual(updated_user1_toptenitem2.name, data2['name'])\n\n toptenitems3 = self.toptenlist_2.topTenItem.all()\n toptenitem_3_id = toptenitems3[0].id\n updated_user2_toptenitem1 = TopTenItem.objects.get(pk=toptenitem_3_id)\n self.assertEqual(updated_user2_toptenitem1.name, data2['name'])\n\n # history has been updated\n history_entry = updated_reusableitem.history[2]\n self.assertNotEqual(history_entry, None)\n self.assertEqual(history_entry['change_request_resolution'], 'accepted')\n self.assertNotEqual(history_entry['changed_request_resolved_at'], None)\n self.assertEqual(history_entry['changed_request_submitted_by_id'], self.user_1.id.__str__())\n self.assertEqual(history_entry['number_of_users'], 2)\n self.assertEqual(history_entry['change_request_votes_yes_count'], 2)\n self.assertEqual(history_entry['change_request_votes_no_count'], 0)\n\n self.assertEqual(history_entry['change_request']['name'], data2['name'])\n self.assertEqual(history_entry['change_request']['definition'], data2['definition'])\n self.assertEqual(history_entry['change_request']['link'], data2['link'])\n\n # User 1 and user 2 should each get a notification of the change request acceptance\n self.assertEqual(Notification.objects.count(), 2)\n\n notification1 = Notification.objects.get(created_by=self.user_1)\n self.assertEqual(notification1.context, 'reusableItem')\n self.assertEqual(notification1.event, 'changeRequestAccepted')\n self.assertEqual(notification1.reusableItem, updated_reusableitem)\n\n notification2 = Notification.objects.get(created_by=self.user_2)\n self.assertEqual(notification2.context, 'reusableItem')\n self.assertEqual(notification2.event, 'changeRequestAccepted')\n self.assertEqual(notification2.reusableItem, updated_reusableitem)", "def test_update_review_modify(self):\n # Setup\n request_url = reverse(\n \"update-modify\",\n host=PUBLISH_HOST,\n kwargs={\n \"pk\": self.dataset_revision.dataset_id,\n \"pk1\": self.dataset_unpublished.organisation_id,\n },\n )\n # Test\n response = self.client.get(request_url)\n\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"publish/feed_form.html\")\n self.assertEqual(\n response.context[\"wizard\"][\"steps\"].current, FeedUpdateWizard.UPLOAD_STEP\n )", "def test_edit(self):\n # Test using the Trovebox class\n html = self.client.photo.edit(self.photos[0])\n self.assertIn(\"<form\", html.lower())\n\n # And the Photo object directly\n html = self.photos[0].edit()\n self.assertIn(\"<form\", html.lower())", "def test_edit_mdt(self):\n response = self.client.get(reverse('edit-mdt', args=[self.sample_type, self.mdt.id]))\n self.assertContains(response, self.proband.gel_id)\n self.assertEqual(response.status_code, 200)", "def test_sync_biz_to_sugar_offer(self):\n LOG.debug('test_sync_biz_to_sugar_offer')\n #business = Business.objects.get(id=114)\n consumer = Consumer.objects.get(id=300)\n consumer.first_name = 'Danielle'\n consumer.last_name = 'Dongo'\n consumer.save()\n offer = Offer.objects.get(id=300)\n offer.create_datetime = datetime.datetime.now()\n offer.save()\n sync_business_to_sugar(offer=offer, sugar=self.sugar)\n module = \"Accounts\"\n query = build_recent_entry_query(module=module, test_mode=True, \n get_modified=False, start=None)\n sugar_list = self.sugar.get_entry_list(module, query)\n self.assertTrue(sugar_list[0]['id'] != -1)\n self.assertEquals(sugar_list[0]['email1'], \n offer.business.advertiser.email)", "def test_edit_event(self):\n self.login()\n # Create event\n fakeName = fake.text()[0:100]\n date = datetime.now()\n self.create_event(fakeName, date)\n time.sleep(2)\n\n self.driver.get(self.live_server_url)\n self.assertEqual(self.live_server_url + \"/\",\n self.driver.current_url, \"Link redirects to other routes.\")\n time.sleep(2)\n\n # Get rendered values\n linkButton = self.driver.find_element_by_id(\"dropdownMenuLink\")\n linkButton.click()\n time.sleep(2)\n\n linkButton2 = self.driver.find_element_by_id(\"edit-event\")\n linkButton2.click()\n self.driver.implicitly_wait(15)\n\n # Form data\n newFakeName = fake.text()[0:100]\n newDueDate = datetime.now()\n\n # Set Event fields\n eventNameField = self.driver.find_element_by_id(\"id_name\")\n dueDateField = self.driver.find_element_by_id(\"id_date\")\n\n # Clear inputs\n eventNameField.clear()\n dueDateField.clear()\n\n self.driver.implicitly_wait(15)\n\n # Fill in event details\n eventNameField.send_keys(newFakeName)\n dueDateField.send_keys(newDueDate.strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n # Submit Form\n time.sleep(10)\n self.driver.find_element_by_css_selector(\n 'input[type=\"submit\"]').click()\n\n # Verify that the event was saved\n self.assertEqual(self.live_server_url + \"/\", self.driver.current_url)\n\n # Verify that the event was edited\n eventName = self.driver.find_element_by_id(\n \"dropdownMenuLink\").get_attribute(\"innerText\").strip()\n self.assertEqual(eventName, newFakeName.strip())\n\n # Clean Test database\n Event.objects.all().delete()", "def test_editEvent(self):\n event_a = Event.objects.create(title=\"Christmas meal\",\n start=datetime.strptime(\"2020-12-03 12:00\", \"%Y-%m-%d %H:%M\"),\n end=datetime.strptime(\"2020-12-03 16:00\", \"%Y-%m-%d %H:%M\"),\n event_owner=self.person_a,\n duration=timedelta(hours=4),\n recurrence_interval=0, description=\"happy christmas party\", website_publish=True)\n event_a.invites.add(self.comms_grp)\n event_a.save()\n client = APIClient()\n update_data = {\"event_owner\": self.person_a.pk, \"title\": \"Christmas meal\", \"start\":\n datetime.strptime(\"2020-12-07 12:00\", \"%Y-%m-%d %H:%M\"),\n \"end\": datetime.strptime(\"2020-12-07 16:00\", \"%Y-%m-%d %H:%M\"), \"duration\": timedelta(hours=4),\n \"invites\": [self.comms_grp.pk], \"recurrence_interval\": 0, \"description\": \"Christmas party yahoo\",\n \"website_publish\": False}\n resp = client.put('/api/events/christmas-meal', data=update_data, format='json')\n self.assertEqual(resp.status_code, 200)\n event_check = Event.objects.get(title=\"Christmas meal\")\n self.assertEqual(event_check.description, \"Christmas party yahoo\")", "def test_form_editing(self):\n update = {\n 'title': 'Last Post (Final)',\n 'content': '### Goodbye!',\n 'is_published': True,\n }\n\n form = self.form_cls(update, instance=self.entry)\n\n form.save()\n\n actual = models.Entry.objects.get(pk=self.entry.pk)\n self.assertEquals(actual.title, update['title'])\n self.assertEquals(actual.content.raw, update['content'])\n self.assertIsNotNone(actual.published_timestamp)" ]
[ "0.7510725", "0.698181", "0.62977237", "0.62476325", "0.6218222", "0.61978376", "0.6181655", "0.6157984", "0.61287814", "0.6128666", "0.61075187", "0.6098314", "0.60974056", "0.6076452", "0.60475904", "0.6021605", "0.5991045", "0.59832805", "0.5976426", "0.59733415", "0.5971182", "0.59689915", "0.59660465", "0.594957", "0.5925188", "0.5913594", "0.59088135", "0.58999634", "0.5892607", "0.58851624" ]
0.7380167
1
Test deletion of an offer.
def test_offers_delete(self, mock_delete): form_data = {'_method': 'DELETE'} result = self.client.post(f'/offers/{sample_offer_id}/delete', data=form_data) self.assertEqual(result.status, '302 FOUND') mock_delete.assert_called_with({'_id': sample_offer_id})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_item_using_delete(self):\n pass", "def test_delete(self):\n pass", "def test_delete_case(self):\n pass", "def test_delete_store_success(self):\n product = sample_product(supplier_id=self.user)\n url = detail_url(product.id)\n res = self.client.delete(url)\n products = Product.objects.all()\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(len(products), 0)", "def test_delete_success(self, mock_delete):\n\n self.policies.delete(id=self.policy_single_response['policy']['id'])\n\n mock_delete.assert_called_once_with(\n url='https://api.newrelic.com/v2/alerts_policies/{0}.json'.format(\n self.policy_single_response['policy']['id']\n ),\n headers=self.policies.headers\n )", "def test_delete_run(self):\n pass", "def test_delete_record(self):\n pass", "def test_delete_boat(self):\n pass", "def test_unsubscribe_offer(self):\n pass", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=SQLPackage)\n self.sql.add(pkg)\n transaction.commit()\n self.sql.add(pkg)\n self.db.delete(pkg)\n count = self.sql.query(SQLPackage).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=DynamoPackage)\n self._save_pkgs(pkg)\n self.db.delete(pkg)\n count = self.engine.scan(DynamoPackage).count()\n self.assertEqual(count, 0)\n count = self.engine.scan(PackageSummary).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def test_a_user_can_delete_dislike(self):\n article = self.create_article()\n\n slug = article.data['data']['slug']\n self.client.post('/api/articles/{}/dislike/'.format(slug),\n HTTP_AUTHORIZATION='Bearer ' +\n self.token,\n format='json')\n dislike = self.client.post('/api/articles/{}/dislike/'.format(slug),\n HTTP_AUTHORIZATION='Bearer ' +\n self.token,\n format='json')\n\n self.assertEqual(dislike.status_code, 200)", "def test_meeting_poll_delete(self):\n pass", "def test_delete_nveto_pmt_item(self):\n pass", "def test_delete_alert_by_id(self):\n pass", "def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_03_product_delete(self):\n product = self.create_product()\n products = self.product_obj.search([])\n self.assertIn(product, products)\n product.unlink()\n self.assertNotIn(product.exists(), products)", "def test_delete__valid(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with register.app.test_request_context(self.request_path):\n actual_json = self.handler.do_delete(self.feature_id)\n self.assertEqual({'message': 'Done'}, actual_json)\n\n revised_feature = models.Feature.get_by_id(self.feature_id)\n self.assertTrue(revised_feature.deleted)", "def test_vault_delete_vault_item(self):\n pass", "def test_delete(self):\n # add a task\n self.add(title=\"Sample task doing\", description=\"for sample\", state=\"doing\")\n task = Task.query.filter_by(title='Sample task doing').first()\n\n # delete\n self.delete(id=task.id)\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertIsNone(task)", "def test_deletehardwares_item(self):\n pass", "def delete(self):\n request = self.request\n raise_operation_error(\n request,\n \"Can't {} bid in Price Quotation tender\".format(\n OPERATIONS.get(request.method),\n ),\n )", "def test_delete(self):\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.delete(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def test_delete(self):\n self.assertFalse(self.user1.ad_deleted)\n self.assertTrue(self.user1.active)\n url = '/api/users/{}/'.format(self.user1.ad_guid)\n data = {'Deleted': True}\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)\n user = DepartmentUser.objects.get(pk=self.user1.pk) # Refresh from db\n self.assertTrue(user.ad_deleted)\n self.assertFalse(user.active)\n self.assertTrue(user.in_sync)\n # Also delete a second object, to check for silly 'empty string' collisions.\n url = '/api/users/{}/'.format(self.user2.ad_guid)\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)", "def test_alarm_view_delete(self):\n # delete event\n request = self.factory.post('/module/alarm/del/1/', follow=True)\n request.user = self.user\n request.session = {}\n response = alarm_del(request, 1)\n self.assertEqual(response.status_code, 302)\n\n request = self.factory.post('/module/alarm/del/', {'select': '1'})\n request.user = self.user\n request.session = {}\n response = alarm_del(request, 0)\n self.assertEqual(response.status_code, 302)", "def test_delete_meal(self):\n with self.client:\n response = self.delete_meal()\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Meal deleted succesfully\", data.get('message'))", "def test_delete_subscription(self):\n pass", "def test_a_user_can_delete_like(self):\n article = self.create_article()\n\n slug = article.data['data']['slug']\n self.client.post('/api/articles/{}/like/'.format(slug),\n HTTP_AUTHORIZATION='Bearer ' +\n self.token,\n format='json')\n like = self.client.post('/api/articles/{}/like/'.format(slug),\n HTTP_AUTHORIZATION='Bearer ' +\n self.token,\n format='json')\n\n self.assertEqual(like.status_code, 200)", "def testDelete(self):\n response = requests.delete(url=self.url)\n headers = response.headers\n json_data = response.json()\n\n self.assertTrue(self.place == storage.get(Place, self.place_id))\n self.assertTrue(self.user == storage.get(User, self.user_id))\n self.assertTrue(self.city == storage.get(City, self.city_id))\n self.assertTrue(self.state == storage.get(State, self.state_id))\n self.assertEqual(response.status_code, 200, WRONG_STATUS_CODE_MSG)\n self.assertEqual(\n headers['Content-Type'], 'application/json', WRONG_TYPE_RETURN_MSG)\n self.assertEqual(len(json_data), 0)\n storage.reload()\n self.assertIsNone(storage.get(Place, self.place_id))", "def test_delete(self):\n\n value = self.instance.delete()\n self.client.delete_instance.assert_called_once_with('nginx')\n self.assertEqual(value, self.client.delete_instance.return_value)" ]
[ "0.68176514", "0.6788493", "0.67163765", "0.6692377", "0.65834457", "0.65589345", "0.65540034", "0.6534039", "0.64845026", "0.64759994", "0.644285", "0.6428234", "0.64046663", "0.6401266", "0.64008033", "0.6393146", "0.6370907", "0.63591313", "0.63482857", "0.63346833", "0.63280064", "0.63089365", "0.63083416", "0.63070416", "0.63052976", "0.62998444", "0.62980205", "0.6293451", "0.6292616", "0.6291715" ]
0.77197415
0
A helper routine for selectionSort which finds the index of the biggest value in data at the mark index or greater.
def _findMaxIndex(data, mark): # assume the maximum value is at initial mark position maxIndex = mark # loop over the remaining positions greater than the mark for mark in range(mark+1, len(data)): # if a bigger value is found, record its index if data[mark][1][2] > data[maxIndex][1][2]: maxIndex = mark return maxIndex
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_max(data):\n index = 0\n res = data[index]\n for i in range(1, len(data)):\n if data[i] > res:\n res = float(data[i])\n index = i\n else:\n break\n return res, index", "def index_largest(seq):\n assert len(seq) > 0\n x, greatest, index = len(seq), seq[0], 0\n for elem in range(1, x):\n if seq[elem] > greatest:\n greatest = seq[elem]\n index = elem\n return index", "def indexOfMax(list):\n max = -np.Infinity\n index = 0\n i = 0\n for value in list:\n if value > max:\n max = value\n index = i\n i += 1\n return index", "def maximo(arr):\n maxVal = float('-inf')\n maxIdx = -1\n\n for i in range(len(arr)):\n if arr[i] > maxVal:\n maxVal = arr[i]\n maxIdx = i\n\n return maxVal, maxIdx", "def get_max_index(a):\n return a.argmax()", "def get_max_index(a):\n return a.argmax()", "def get_max_index(a):\n return a.argmax()", "def pick_largest(self, cut_off):\r\n for i in range(self.dimension):\r\n m = self.masked[int(self.rank_yx(self.rank[i])[0]) # locating the corresponding mark array\r\n ,int(self.rank_yx(self.rank[i])[1])]\r\n if m * self.image_data[i] == self.image_data[i]:\r\n if self.image_data[i] <= cut_off:\r\n print(\"Surveying completed\")\r\n return -1,-1 # returns -1,-1 if scan is completed\r\n else:\r\n return self.image_data[i], np.array(self.rank[i])", "def get_youngest_student(students):\n youngest_index = 0 \n youngest = students[0][3]\n for counter, row in enumerate(students[1:], 1):\n if int(row[3]) > int(youngest):\n youngest = students[counter][3]\n youngest_index = counter \n return students[youngest_index]", "def get_largest_index(student_list, length):\n largest_index = 0\n for i in range(length):\n if student_list[i] > student_list[largest_index]:\n largest_index = i\n return largest_index", "def findmaxidx(datasets, target='atom_label'):\n\n if target == 'atom_label':\n return _findmaxidx(datasets, 0)\n elif target == 'wle_label':\n return _findmaxidx(datasets, 2)", "def getMax(array_list):\n m = array_list[0]\n m_index = 0\n for i,value in enumerate(array_list):\n if value > m:\n m = value\n m_index = i\n return (m_index,m)", "def max_pos(self, start, end, header) -> int:", "def find_max(ls):\n\n if len(ls) == 1:\n return ls[0]\n elif len(ls) == 2:\n return ls[0] if ls[0] > ls[1] else ls[1]\n else:\n mid = len(ls) // 2\n m1 = find_max(ls[0:mid])\n m2 = find_max(ls[mid:])\n return m1 if m1 > m2 else m2", "def findMinFrom(lst, mark):\n iMin = mark\n for i in range(mark + 1, len(lst)):\n if lst[i] < lst[iMin]:\n iMin = i\n return iMin", "def find_max(self):\n if self.right:\n return self.right.find_max()\n return self.data", "def find_max(list):\n return find_value_at(list, 0)", "def largest_plr(x, idx):\n l = (idx << 1) + 1\n r = (idx + 1) << 1\n largest = jnp.where(x[idx] < x[l], l, idx)\n largest = jnp.where(x[largest] < x[r], r, largest)\n return largest", "def largest_item(list):\n pass", "def find_max_in_array(arr, k):\r\n print(\" Amazon interview question\")\r\n arr[:] = sorted(arr)\r\n return ((arr[len(arr)-k]))", "def get_max_interest(self):\n max_int = max(self.table[\"interest\"])\n print(max_int)\n for index in self.table[\"index\"]:\n if self.table[\"interest\"][index] == max_int:\n return index", "def max_info(lst):\n k = []\n maxm = -1\n for i in range(len(lst)):\n if lst[i] == maxm:\n k.append(i)\n if lst[i] > maxm:\n maxm = lst[i]\n k = [i]\n return k", "def argmax(sequence):\r\n\r\n import operator\r\n index, value = max(enumerate(sequence), key=operator.itemgetter(1))\r\n\r\n return index", "def find_gt_index(a, x):\n i = bisect_right(a, x)\n if i < len(a):\n return i\n raise ValueError", "def selection_sort_max_version(arr):\n # No need to sort\n if arr is None:\n return arr\n\n n = len(arr)\n if n <= 1:\n return arr\n\n # i - range in order\n # j - range out of order\n for i in range(n - 1, 0, -1):\n max_index = i\n j = i - 1\n\n # select max element in range[0, j]\n while j >= 0:\n if arr[j] > arr[max_index]:\n max_index = j\n j -= 1\n\n arr[i], arr[max_index] = arr[max_index], arr[i]\n\n return arr", "def max_index_of_smaller_number(list, number):\n for i, element in enumerate(list):\n if element >= number:\n return i - 1", "def find_max_index(A: List[int], x: int) -> int:\n max_index = -1\n start = 0\n end = len(A) - 1\n\n while start <= end:\n mid = start + (end - start) // 2\n\n if A[mid] == x:\n max_index = mid\n start = mid + 1\n elif x < A[mid]:\n end = mid - 1\n else:\n start = mid + 1\n\n return max_index", "def my_max(in_list):\n biggest = in_list[0]\n for l in in_list:\n if l > biggest:\n biggest = l\n return biggest", "def find_greatest_number(incoming_list):\n #magiclownumber= none\n #retval= magiclownumber\n #for value in incoming_list:\n #if not retval:\n #retval = value\n # if value> retvale\n #retval= value\n #return retval\n greatest_number = max(incoming_list)\n return greatest_number", "def imax(self):\n return self.elem.index(max(self))" ]
[ "0.6781426", "0.67771745", "0.64360774", "0.63961357", "0.6354491", "0.6354491", "0.6354491", "0.6315467", "0.6232908", "0.61545926", "0.6070463", "0.60310376", "0.5987817", "0.5986606", "0.59847385", "0.59797347", "0.5973072", "0.596987", "0.5966308", "0.5958042", "0.5954138", "0.59415674", "0.5939533", "0.59227484", "0.5920404", "0.5890787", "0.5880467", "0.5878741", "0.5873084", "0.5869803" ]
0.85851187
0
Perform an inplace selection sort of data.
def selectionSort(data): for mark in range(len(data)-1): maxIndex = _findMaxIndex(data, mark) # swap the element at marker with the min index data[mark], data[maxIndex] = data[maxIndex], data[mark] return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def selection_sort(self, data):\n for i in range(len(data)-1, 0, -1):\n i_max = 0\n for j in range(1, i+1):\n if data[j] > data[i_max]:\n i_max = j\n tmp = data[i]\n data[i] = data[i_max]\n data[i_max] = tmp\n print \"pass\", i, data", "def selection_sort(unsorted):\n n = len(unsorted)\n _sorted = []\n for _ in range(0, n):\n val = min(unsorted)\n _sorted.append(val)\n unsorted.remove(val)\n del unsorted", "def oldsortslice(self):\n ...", "def selection_sort(l):\n\tfor i in range(len(l)):\n\t\tmin_idx = i\n\n\t\t# select minimun value in remained array\n\t\tfor j in range(i + 1, len(l)):\n\t\t\tif l[min_idx] > l[j]:\n\t\t\t\tmin_idx = j\n\t\t__swap(l, i, min_idx)", "def selectionsort(array):\n for i in range(len(array)):\n min_index = i\n for j in range(i, len(array)):\n if array[j] < array[min_index]:\n min_index = j\n if i != min_index:\n array[i], array[min_index] = array[min_index], array[i]\n return array", "def _selection_sort(unsorted, n):\n for i in range(0, n):\n min_ = min(unsorted[i:])\n min_index = unsorted.index(min_)\n swap = unsorted[i]\n unsorted[i] = min_\n unsorted[min_index] = swap", "def selection_sort(array):\n # Your code\n for i in range(len(array) - 1):\n min_index = i\n for j in range(i + 1, len(array)):\n if array[j] < array[min_index]:\n min_index = j\n array[min_index], array[i] = array[i], array[min_index]\n return array", "def sort_results(self, sort_option):\r\n self.model.sort_data(sort_option)", "def selection_sort(l):\n walk = 0\n while walk < len(l):\n i = walk\n while i < len(l):\n if l[i] < l[walk]:\n # swap i and walk\n tmp = l[walk]\n l[walk] = l[i]\n l[i] = tmp\n i += 1\n walk += 1\n return", "def selection_sort(cls, student_list):\n end_index = len(student_list) - 1\n for i in range(len(student_list) - 1):\n large_index = cls.get_largest_index(student_list, end_index + 1)\n student_list[large_index], student_list[end_index] = \\\n student_list[end_index], student_list[large_index]\n end_index -= 1", "def reorder( self ):\n self.sorted.sort(self.compareFunction)", "def selection_sort(items):\n # Repeat until all items are in sorted order\n # Find minimum item in unsorted items\n # Swap it with first unsorted item\n # TODO: Running time: ??? Why and under what conditions?\n # TODO: Memory usage: ??? Why and under what conditions?\"\"\"\n # pdb.set_trace()\n while not is_sorted(items):\n for i in range(len(items) - 1):\n # setting the minimum to start with\n min = i\n # Start looping from the current index i\n for j in range(i + 1, len(items)):\n # if j is less than our current minimum\n if items[j] < items[min]:\n # set j to our minimum\n min = j\n # Once loop is done set i to be our minimum\n items[i], items[min] = items[min], items[i]\n return items", "def sort_by_default(self):\n self.data.sort()", "def SelectionSort(ulist):\n for i in range(len(ulist)):\n mini = ulist[i]\n mpos = i;\n for j in range(i, len(ulist)):\n if mini < ulist[j]:\n mini = ulist[j]\n mpos = j\n ulist[i], ulist[mpos] = ulist[mpos], ulist[j]", "def selection_sort(items):\n # TODO: Repeat until all items are in sorted order\n # TODO: Find minimum item in unsorted items\n # TODO: Swap it with first unsorted item\n for x in range(len(items)):\n smallest_index = x\n if x!=len(items)-1:\n for y in range(x+1, len(items)):\n if items[y] < items[smallest_index]:\n smallest_index = y\n temp = items[x]\n items[x] = items[smallest_index]\n items[smallest_index] = temp", "def selection_sort(array):\n # traverse the array\n for i in xrange(len(array)):\n # initialize min index\n min_index = i\n # find the least element in unsorted list and update min index\n for j in xrange(i+1, len(array)):\n if array[j] < array[min_index]:\n min_index = j\n # swap current element with min index value\n array[i], array[min_index] = array[min_index], array[i]\n # return array\n return array", "def selection_sort(input, compare=lt):\n list_len = len(input)\n for i in xrange(list_len - 1):\n # the index where the target, based on compare, resides \n target_index = i\n for j in xrange(i+1, list_len):\n if not compare(input[target_index], input[j]):\n target_index = j\n # swap the target to the end of the sorted list\n input[i], input[target_index] = input[target_index], input[i]\n return input", "def selection_sort(items):\n # Repeat until all items are in sorted order\n # Find minimum item in unsorted items\n # Swap it with first unsorted item\n current = 0\n minimum = 0\n first = 0\n while not is_sorted(items):\n if items[current] < items[minimum]:\n minimum = current\n\n elif current == len(items) - 1:\n items[minimum], items[first] = items[first], items[minimum]\n first += 1\n current = first\n minimum = first\n \n else:\n current += 1", "def selection_sort(L):\n for i in range(len(L)):\n # Find the index of the smellest item in L[i:] and swap that item with the item at index i.\n\n index_of_smallest = get_index_of_smallest(L,i)\n L[index_of_smallest], L[i] = L[i], L[index_of_smallest]", "def selection_sort(array):\n n = len(array)\n result = array.copy()\n for i in range(n - 1):\n # Find next-smallest value\n smallest = i\n for j in range(i + 1, n):\n if compare(result[j], result[smallest]) < 0:\n smallest = j\n # Swap next-smallest value into position\n if i != smallest:\n result[i], result[smallest] = result[smallest], result[i]\n\n return result", "def selection_sort(deck):\n for firstIndex in range(len(deck)):\n min_index = simple_index_of_min(deck, firstIndex)\n deck = swap(deck, firstIndex, min_index)\n return deck", "def selSort(L):\n\tfor i in range(len(L) - 1):\n\t\tminIndx = i\n\t\tminVal= L[i]\n\t\tj = i + 1\n\t\twhile j < len(L):\n\t\t\tif minVal > L[j]:\n\t\t\tminIndx = j\n\t\t\tminVal= L[j]\n\t\t\tj += 1\n\t\ttemp = L[i]\n\t\tL[i] = L[minIndx]\n\t\tL[minIndx] = temp", "def selection_sort(seq):\n length = len(seq)\n\n for i in range(length-1, 0, -1):\n\n max_pos = 0 # Position of max value\n\n for j in range(1, i+1):\n if seq[j] > seq[max_pos]:\n max_pos = j\n\n tmp = seq[i]\n seq[i] = seq[max_pos]\n seq[max_pos] = tmp\n\n return seq", "def selection_sort(lista):\n for index in range(0, len(lista)):\n min_index = index\n\n for right in range(index + 1, len(lista)):\n if lista[right] < lista[min_index]:\n min_index = right\n\n lista[index], lista[min_index] = lista[min_index], lista[index]", "def selection_sort(arr: List[int]):\n for i in range(len(arr) - 1):\n j = find_smallest(i, arr)\n swap_elements(i, j, arr)\n\n return arr", "def sort(self):\r\n self.candidates.sort(key=self.sortFitness)\r\n return", "def _sort(self):\n self.population.sort()\n self.population.reverse()", "def sort():\n return -1", "def selection_sort(unsorted_list):\n if len(unsorted_list) <= 1:\n return unsorted_list\n for index in range(len(unsorted_list)):\n lowest_number = index\n for i in range(index, len(unsorted_list)):\n if unsorted_list[lowest_number] > unsorted_list[i]:\n lowest_number = i\n unsorted_list[index], unsorted_list[lowest_number] = unsorted_list[lowest_number], unsorted_list[index]\n return unsorted_list", "def sort(self):\r\n return self.sort_targets([self])" ]
[ "0.77006024", "0.67082053", "0.65607554", "0.6547946", "0.6539799", "0.6534672", "0.6529109", "0.64570767", "0.6438951", "0.6392037", "0.63764393", "0.63130563", "0.6299781", "0.6295771", "0.6284434", "0.6272947", "0.62266135", "0.62155014", "0.6215076", "0.6189748", "0.6166912", "0.61367834", "0.6132635", "0.6129301", "0.6124682", "0.6109628", "0.6079951", "0.60730547", "0.6055195", "0.60549337" ]
0.7535001
1
Perform crossover from p1, p2 into c1, c2. Note that we do not actually perform crossover. We build a model from all the parents. Then we sample the model to produce the children. This function is called self.pop_size / 2 times.
def crossover (self, p1, p2, p_pop, c1, c2, c_pop) : assert self.crossover_count < self.pop_size assert self.get_iteration () == self.last_gen self.parents.append (p1) self.parents.append (p2) self.crossover_count += 2 if self.crossover_count == self.pop_size : assert (self.get_iteration () == self.last_gen) print (self.get_iteration ()) sys.stdout.flush () self.build_model (p_pop) self.sample_model (c1, c2, c_pop) self.crossover_count = 0 self.parents = [] self.children = {} self.last_gen += 1 self.clear_cache ()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def crossover(self, pop):\n intDiscID = self.iID + self.dID\n varID = self.cID\n goldenRatio = (1.0 + sqrt(5)) / 2.0\n dx = np.zeros_like(pop[0])\n children = []\n used = []\n for i in range(0, int(self.fracElite * len(pop)), 1):\n r = int(rand() * self.population)\n while r in used or r == i:\n r = int(rand() * self.population)\n\n used.append(i)\n children.append(cp.deepcopy(pop[r]))\n dx = abs(pop[i] - children[i]) / goldenRatio\n children[i] = children[i] + dx * varID + np.round(dx * intDiscID)\n children[i] = simple_bounds(children[i], self.lb, self.ub)\n\n return (\n children, used)", "def _cross_parents(self):\n while len(self.children_population) < self.children_count:\n parent_1, parent_2 = random.sample(self.population, k=2)\n self.children_population.extend(self.crossover.cross(parent_1, parent_2))", "def _crossover(self, best_population, crossover, n_parents=2, method=\"uniform_swap\"):\n if crossover:\n # randomly select parents\n parents_indexes = torch.randint(0, len(best_population), (self.population_size, n_parents),\n device=self.device)\n new_population = torch.zeros(self.population.shape, device=self.device)\n i = 0\n for p_idx in parents_indexes:\n new_population[i] = self._produce_child(best_population[p_idx], method=method)\n i += 1\n else:\n # randomly repeat best individuals\n new_pop_indexes = torch.randint(0, len(best_population), (self.population_size,), device=self.device)\n new_population = best_population[new_pop_indexes]\n return new_population", "def cross(self):\n\n for i in range(self.pop_num): # Put in the first pop_num elements of the \"Parents and Sons\" array our entire input population.\n self.par_and_sons[i].A=self.population[i].A.copy()\n\n random.shuffle(self.population) # Shuffle population.\n\n tt=0 # The counter that is needed to implement a non-trivial crossing.\n for s in range(0,self.pop_num,2): # From 0 to pop_num with step 2. That is. here we take pop_num / 2 pairs of parents.\n self.mother.A=self.population[tt+int(self.pop_num/2)].A # Let the last pop_num / 2 individuals of our population be our mothers.\n self.father.A=self.population[tt].A # And let first pop_num / 2 individuals of our population be dads.\n \n tt=tt+1 \n ran=random.random()\n\n for n in range(self.length): # Crossover.\n if random.random()>0.5:\n self.son1.A[n] = self.father.A[n]\n self.son2.A[self.length-1-n] = self.father.A[n]\n self.son3.A[n] = self.mother.A[n]\n self.son4.A[self.length-1-n] = self.mother.A[n]\n else:\n self.son1.A[n] = self.mother.A[n]\n self.son2.A[self.length-1-n] = self.mother.A[n]\n self.son3.A[n] = self.father.A[n]\n self.son4.A[self.length-1-n] = self.father.A[n]\n\n self.par_and_sons[self.pop_num+2*s].A = self.son1.A.copy()\n self.par_and_sons[self.pop_num+2*s+1].A = self.son2.A.copy()\n self.par_and_sons[self.pop_num+2*s+2].A = self.son3.A.copy()\n self.par_and_sons[self.pop_num+2*s+3].A = self.son4.A.copy()", "def crossover(parent1, parent2):\n child = parent1.clone()\n for k in range(parent1.num_input + parent1.num_output):\n if np.random.randint(2) == 1:\n child.identifiers[k] = parent2.identifiers[k]\n child.inhibitors[k] = parent2.inhibitors[k]\n child.enhancers[k] = parent2.enhancers[k]\n\n child.identifiers = child.identifiers[:(child.num_input +\n child.num_output)]\n child.inhibitors = child.inhibitors[:(child.num_input + child.num_output)]\n child.enhancers = child.enhancers[:(child.num_input + child.num_output)]\n\n p1range = list(range(parent1.num_input + parent1.num_output,\n parent1.size()))\n random.shuffle(p1range)\n p2range = list(range(parent2.num_input + parent2.num_output,\n parent2.size()))\n random.shuffle(p2range)\n\n p1remaining = deepcopy(p1range)\n\n # Crossing regulatory\n p1_gene_count = 0\n p2_gene_count = 0\n for p1idx in p1range:\n min_dist = config.CROSSOVER_THRESHOLD\n paired_idx = None\n for p2idx in p2range:\n gdist = parent1.protein_distance(parent2, p1idx, p2idx)\n if gdist < min_dist:\n min_dist = gdist\n paired_idx = p2idx\n if paired_idx is not None:\n if np.random.randint(2) == 0:\n chosen_parent = parent1\n chosen_idx = p1idx\n p1_gene_count += 1\n else:\n chosen_parent = parent2\n chosen_idx = p2idx\n p2_gene_count += 1\n child.identifiers = np.append(\n child.identifiers, chosen_parent.identifiers[chosen_idx])\n child.inhibitors = np.append(\n child.inhibitors, chosen_parent.inhibitors[chosen_idx])\n child.enhancers = np.append(\n child.enhancers, chosen_parent.enhancers[chosen_idx])\n # Remove from consideration again\n p2range = list(set(p2range) - set([p2idx]))\n p1remaining = list(set(p1remaining) - set([p1idx]))\n\n # Add remaining material\n if child.size() == (child.num_input + child.num_output):\n prob = 0.5\n else:\n prob = p1_gene_count / (p1_gene_count + p2_gene_count)\n\n chosen_parent = parent2\n chosen_range = p2range\n if np.random.random() < prob:\n chosen_parent = parent1\n chosen_range = p1remaining\n\n for idx in chosen_range:\n child.identifiers = np.append(child.identifiers,\n chosen_parent.identifiers[idx])\n child.inhibitors = np.append(child.inhibitors,\n chosen_parent.inhibitors[idx])\n child.enhancers = np.append(child.enhancers,\n chosen_parent.enhancers[idx])\n\n child.num_regulatory = child.size() - (child.num_input + child.num_output)\n\n # Cross dynamics\n if np.random.random() < 0.5:\n child.beta = parent1.beta\n else:\n child.beta = parent2.beta\n\n if np.random.random() < 0.5:\n child.delta = parent1.delta\n else:\n child.delta = parent2.delta\n\n return child", "def doCrossover(parentPop, parSize, rosterSize):\n\n firstPar = random.randint(0, parSize - 1)\n secondPar = random.randint(0, parSize - 1)\n while secondPar == firstPar:\n secondPar = random.randint(0, parSize - 1)\n\n crossOverPt = random.randint(1, rosterSize - 2) # random num between second and second-to-last entry\n\n # debugging code\n # for i in range(rosterSize):\n # parentPop[firstPar].roster[i] = 2*i\n # parentPop[secondPar].roster[i] = 2*i + 1\n\n # first parent mapping\n chromosome = [parentPop[firstPar].roster[i] for i in range(crossOverPt)]\n\n # second parent mapping\n remainingLoops = rosterSize - len(chromosome)\n for i in range(remainingLoops):\n chromosome.append(parentPop[secondPar].roster[crossOverPt + i])\n return chromosome", "def segmented_crossover(\n self, mating_pop_dict, test=False, pairs=[], crossover_prob={}\n ):\n\n print('Performing crossovers')\n\n # Initialises dictionary of child networks\n crossover_pop_dict = OrderedDict()\n\n if test is False:\n # Selects pairs of networks at random to crossover with each other\n network_num = list(mating_pop_dict.keys())\n random.shuffle(network_num)\n network_num = iter(network_num) # Do not merge with line below,\n # and do not introduce any lines of code between them!\n network_num = list(zip(network_num, network_num))\n else:\n network_num = pairs\n\n # Performs segmented crossover\n for index, network_pair in enumerate(network_num):\n network_num_1 = network_pair[0]\n network_num_2 = network_pair[1]\n mate_1 = copy.deepcopy(mating_pop_dict[network_num_1])\n mate_2 = copy.deepcopy(mating_pop_dict[network_num_2])\n\n swap = False\n for node in list(mate_1.nodes):\n type_1 = mate_1.nodes()[node]['type']\n type_2 = mate_2.nodes()[node]['type']\n if type_1 != type_2:\n raise TypeError(\n 'Difference between type of {} in {} ({} = {}; {} ='\n ' {}) - should be identical'.format(node, network_pair,\n network_num_1, type_1, network_num_2, type_2)\n )\n if type_1 == 'loop':\n continue\n\n if test is False:\n random_number = random.uniform(0, 1)\n else:\n random_number = crossover_prob[index][node]\n\n if swap is False:\n if random_number <= self.swap_start_prob:\n swap = True\n else:\n swap = False\n elif swap is True:\n if random_number <= self.swap_stop_prob:\n swap = False\n else:\n swap = True\n\n if swap is True:\n # Copy to prevent these dictionaries from updating when the\n # node attributes are updated in the code below (otherwise\n # both nodes will be assigned the same identity as the node\n # in mate_1, instead of the node identities being crossed\n # over)\n mate_1_attributes = copy.deepcopy(mate_1.nodes()[node])\n mate_2_attributes = copy.deepcopy(mate_2.nodes()[node])\n # mate_1.nodes()[node] = {} does not work, get\n # TypeError: 'NodeView' object does not support item assignment\n for attribute in list(mate_1.nodes()[node].keys()):\n del mate_1.nodes()[node][attribute]\n for attribute in list(mate_2.nodes()[node].keys()):\n del mate_2.nodes()[node][attribute]\n nx.set_node_attributes(mate_1, values={node: mate_2_attributes})\n nx.set_node_attributes(mate_2, values={node: mate_1_attributes})\n\n crossover_pop_dict[network_num_1] = mate_1\n crossover_pop_dict[network_num_2] = mate_2\n\n return crossover_pop_dict", "def inversion_crossover(self, pop):\n children, tmpNonComb, used = ([] for i in range(3))\n for i in range(0, int(len(pop) * self.fracElite), 1):\n r = int(rand() * len(pop))\n while r == i:\n r = int(rand() * len(pop))\n\n if sum(self.cID + self.dID + self.iID) != 0:\n nonComb1 = pop[i][:np.where(self.cID + self.dID + self.iID == 1)[0][(-1)] + 1]\n nonComb2 = pop[r][:np.where(self.cID + self.dID + self.iID == 1)[0][(-1)] + 1]\n if sum(self.xID) != 0:\n comb1 = pop[i][:np.where(self.xID == 1)[0][(-1)] + 1]\n comb2 = pop[r][:np.where(self.xID == 1)[0][(-1)] + 1]\n if sum(self.cID + self.dID + self.iID) != 0:\n c = int(rand() * len(nonComb1))\n if rand() > 0.5:\n tmpNonComb.append(np.array(nonComb1[0:c + 1].tolist() + nonComb2[c + 1:].tolist()))\n else:\n tmpNonComb.append(np.array(nonComb2[0:c + 1].tolist() + nonComb1[c + 1:].tolist()))\n used.append(i)\n if sum(self.xID) != 0:\n c = int(rand() * len(comb1))\n for c1 in range(c, len(comb1), 1):\n d2 = (contains_sublist(comb2, comb1[c1]) + 1) % len(comb1)\n d1 = contains_sublist(comb1, comb2[d2])\n c2 = contains_sublist(comb2, comb1[((d1 + 1) % len(comb1))]) % len(comb1)\n tmp1 = cp.copy(comb1)\n if c1 < d1:\n tmp1[(c1 + 1):(d1 + 1)] = list(reversed(tmp1[c1 + 1:d1 + 1]))\n else:\n tmp1[d1:c1] = list(reversed(tmp1[d1:c1]))\n tmp2 = cp.copy(comb2)\n if c2 < d2:\n tmp2[c2:d2] = list(reversed(tmp2[c2:d2]))\n else:\n tmp2[(d2 + 1):(c2 + 1)] = list(reversed(tmp2[d2 + 1:c2 + 1]))\n if sum(self.cID + self.dID + self.iID) == 0 and sum(self.xID) != 0:\n children.append(tmp1)\n children.append(tmp2)\n elif sum(self.cID + self.dID + self.iID) != 0 and sum(self.xID) != 0:\n children.append(np.concatenate(tmpNonComb[(-1)], tmp1))\n children.append(np.concatenate(tmpNonComb[(-1)], tmp2))\n used.append(i)\n used.append(r)\n\n if sum(self.cID + self.dID + self.iID) != 0 and sum(self.xID) == 0:\n children = tmpNonComb\n return (\n children, used)", "def crossover(new_pop, k):\n shuffle(new_pop)\n for i in range(len(new_pop) // 2):\n points = random.sample(range(1, len(new_pop[i])), k)\n points.sort()\n for fold in range(k):\n x = points[fold]\n tmp = new_pop[2 * i][:x].copy()\n new_pop[2 * i][:x], new_pop[2 * i + 1][:x] = new_pop[2 * i +\n 1][:x], tmp\n return new_pop", "def crossOver(self):\n # copy all the chromosomes from the current generation to a regular python list\n # start with an empty list\n lstChromosomes = []\n # loop through all the items in the queue\n while not self.generation.empty():\n # take a chromosome off the queue\n chromosome = self.generation.get()\n # append the chromosome to the list\n lstChromosomes.append(chromosome)\n # create an empty priority queue for the new generation\n newGeneration = PriorityQueue()\n # cross-over all chromosomes in turn - start with the beginning of the list\n for chrom1Index in range(0, len(lstChromosomes)-1):\n # cross-over with all chromosomes that come after it\n for chrom2Index in range(chrom1Index, len(lstChromosomes)):\n # get the chromosomes we are crossing over\n chrom1 = lstChromosomes[chrom1Index]\n chrom2 = lstChromosomes[chrom2Index]\n # perform the cross-over operation\n xOver = chrom1.crossOver(chrom2)\n # create two new chromosome objects\n newChrom1 = self.chromosomeClass()\n newChrom2 = self.chromosomeClass()\n # set their genes to the values created by crossover operation\n newChrom1.genes = xOver[0]\n newChrom2.genes = xOver[1]\n # save the new chromosomes we just created\n newGeneration.put(newChrom1)\n newGeneration.put(newChrom2)\n # save all the original chromosomes\n for chromosome in lstChromosomes:\n newGeneration.put(chromosome)\n # keep track of all the chromosomes we create\n lstChromosomes = []\n # keep track of how many we are keeping\n chromosomesKept = 0\n # as long as we haven't added more chromosomes than the population is supposed to have\n # and we have more chromosomes to add...\n while chromosomesKept < self.populationSize and not newGeneration.empty():\n # take a chromosome off the new generation queue\n newChromosome = newGeneration.get()\n # have we seen this chromosome before?\n if (not newChromosome in lstChromosomes):\n # store it in our list of chromosomes\n lstChromosomes.append(newChromosome)\n # store it in the queue in the chromosome\n self.generation.put(newChromosome)\n # increase our count of chromosomes kept\n chromosomesKept += 1\n # as long as we haven't added more chromosomes than the population is supposed to have, create\n # random chromosomes\n while chromosomesKept < self.populationSize:\n # create a random chromosome\n newChromosome = self.chromosomeClass()\n # have we seen this chromosome before?\n if (not newChromosome in lstChromosomes):\n # store it in our list of chromosomes\n lstChromosomes.append(newChromosome)\n # store it in the queue in the chromosome\n self.generation.put(newChromosome)\n # increase our count of chromosomes kept\n chromosomesKept += 1", "def crossover(self):\n self.sort_population()\n elite_amount = round(self.elite_rate * self.population_size)\n # preserve from the top\n new_population = [ele for ele in self.population if ele.ttl > 0]\n for individual in new_population:\n if individual.ttl > 0:\n individual.ttl -= 1\n new_population += self.population[:elite_amount]\n\n while len(new_population) < self.population_size:\n # newGene = self.crossBelowCrossRate()\n new_gene, new_gene2 = self.cross_on_arb_seq()\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene)\n new_population.append(new_gene)\n if len(new_population) == self.population_size:\n break\n\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene2)\n new_population.append(new_gene2)\n self.population = new_population", "def uniform_crossover(\n self, mating_pop_dict, test=False, pairs=[], crossover_prob={}\n ):\n\n print('Performing crossovers')\n\n # Initialises dictionary of child networks\n crossover_pop_dict = OrderedDict()\n\n if test is False:\n # Selects pairs of networks at random to crossover with each other\n network_num = list(mating_pop_dict.keys())\n random.shuffle(network_num)\n network_num = iter(network_num) # Do not merge with line below,\n # and do not introduce any lines of code between them!\n network_num = list(zip(network_num, network_num))\n else:\n network_num = pairs\n\n # Performs uniform crossover\n for index, network_pair in enumerate(network_num):\n network_num_1 = network_pair[0]\n network_num_2 = network_pair[1]\n mate_1 = copy.deepcopy(mating_pop_dict[network_num_1])\n mate_2 = copy.deepcopy(mating_pop_dict[network_num_2])\n\n for node in list(mate_1.nodes):\n type_1 = mate_1.nodes()[node]['type']\n type_2 = mate_2.nodes()[node]['type']\n if type_1 != type_2:\n raise TypeError(\n 'Difference between type of {} in {} ({} = {}; {} ='\n ' {}) - should be identical'.format(node, network_pair,\n network_num_1, type_1, network_num_2, type_2)\n )\n if type_1 == 'loop':\n continue\n\n if test is False:\n random_number = random.uniform(0, 1)\n else:\n random_number = crossover_prob[index][node]\n\n if random_number <= self.crossover_prob:\n # Copy to prevent these dictionaries from updating when the\n # node attributes are updated in the code below (otherwise\n # both nodes will be assigned the same identity as the node\n # in mate_1, instead of the node identities being crossed\n # over)\n mate_1_node_attributes = copy.deepcopy(mate_1.nodes()[node])\n mate_2_node_attributes = copy.deepcopy(mate_2.nodes()[node])\n # mate_1.nodes()[node] = {} does not work, get\n # TypeError: 'NodeView' object does not support item assignment\n for attribute in list(mate_1.nodes()[node].keys()):\n del mate_1.nodes()[node][attribute]\n for attribute in list(mate_2.nodes()[node].keys()):\n del mate_2.nodes()[node][attribute]\n nx.set_node_attributes(mate_1, values={node: mate_2_node_attributes})\n nx.set_node_attributes(mate_2, values={node: mate_1_node_attributes})\n\n crossover_pop_dict[network_num_1] = mate_1\n crossover_pop_dict[network_num_2] = mate_2\n\n return crossover_pop_dict", "def dynamic_crossover(nn1, nn2):\n # Lists for respective weights\n nn1_weights = get_weights(nn1.layers)\n nn2_weights = get_weights(nn2.layers)\n child_weights = []\n\n # Iterate through all weights from all layers for crossover\n for index, _ in enumerate(nn1_weights):\n # Get single point to split the matrix in parents based on # of cols\n coulmns = np.shape(nn1_weights[index])[1]-1\n split = random.randint(0, coulmns)\n # Iterate through after a single point and set the remaing cols to nn_2\n for j in range(split, coulmns):\n nn1_weights[index][:, j] = nn2_weights[index][:, j]\n\n # After crossover add weights to child\n child_weights.append(nn1_weights[index])\n\n # Add a chance for mutation\n mutation(child_weights)\n\n # Create and return child object\n return NeuralNetwork(child_weights)", "def _apply_crossover(pop, op, pb):\n for i in range(1, len(pop), 2):\n if random.random() < pb:\n pop[i - 1], pop[i] = op(pop[i - 1], pop[i])\n del pop[i - 1].fitness.values\n del pop[i].fitness.values\n return pop", "def crossover(self, parents: ChromList) -> ChromList:\n raise NotImplementedError", "def crossover(self, parent_1, parent_2):\r\n start = randrange(1, NUM_OF_GENETIC_UNITS + 1)\r\n end = randrange(1, NUM_OF_GENETIC_UNITS + 1)\r\n if end < start:\r\n start, end = end, start\r\n if start == end:\r\n end = NUM_OF_GENETIC_UNITS + 1\r\n if randrange(1, 3) == 1:\r\n parent_a = parent_1\r\n parent_b = parent_2\r\n else:\r\n parent_a = parent_2\r\n parent_b = parent_1\r\n crossover_of_genetic_units = {genetic_unit_num: parent_a.genetic_units[genetic_unit_num] for genetic_unit_num\r\n in parent_a.genetic_units}\r\n for genetic_unit_num in range(start, end):\r\n crossover_of_genetic_units[genetic_unit_num] = parent_b.genetic_units[genetic_unit_num]\r\n return crossover_of_genetic_units", "def _crossover(self, sel):\n offspring = []\n for p1, p2 in sel:\n p1 = copy.deepcopy(p1)\n p2 = copy.deepcopy(p2)\n\n tmp = self.op.crossover(\n copy.deepcopy(p1['individual']),\n copy.deepcopy(p2['individual']))\n if not tmp[0] is None and not tmp[1] is None:\n c1 = {\n 'individual': tmp[0],\n 'fitness': self.op.fitness(tmp[0])\n }\n c2 = {\n 'individual': tmp[1],\n 'fitness': self.op.fitness(tmp[1])\n }\n\n offspring.append(\n c1 if c1['fitness'] < p1['fitness'] else p1)\n offspring.append(\n c2 if c2['fitness'] < p2['fitness'] else p2)\n else:\n offspring.extend((p1, p2))\n return offspring", "def crossover(self):\n print(' - crossover')\n s = time.time()\n\n # make a list with all index\n tmp_list = list(range(0, self.size))\n while len(tmp_list) > 0:\n candidate_1 = random.choice(tmp_list)\n tmp_list.remove(candidate_1)\n candidate_2 = random.choice(tmp_list)\n tmp_list.remove(candidate_2)\n\n # ceck if the two candidates will crossover\n chance = random.uniform(0, 1)\n if chance <= self.crossover_rate:\n self.crossover_two_candidates(candidate_1, candidate_2)\n\n e = time.time()\n print(\" - time: \", e - s)", "def evolve_generation(pop, probs, best_member, p_c, p_m):\n if best_member is None:\n new_pop = []\n else:\n new_pop = [best_member]\n while len(new_pop) < len(pop):\n NN1, NN2 = np.random.choice(pop, size=2, p=probs)\n new_pop.append(crossover(NN1, NN2, p_c, p_m))\n return new_pop", "def crossover(x1,x2):\n for chromo in x1.chromosomes:\n result_chromos = [np.zeros((chromo.shape))]\n #result_chromos = [np.zeros((chromo.shape)) for chromo in x1.chromosomes]\n i = 0\n for j in range(len(x1.chromosomes[i])):\n for k in range(len(x1.chromosomes[i][j])):\n if(np.random.rand(1) < 0.5):\n result_chromos[i][j][k] = x1.chromosomes[i][j][k]\n else:\n result_chromos[i][j][k] = x2.chromosomes[i][j][k]\n if(np.random.rand(1)< 0.8):#at 0.3 very agressive\n result_chromos[i][j][k] += -0.05 + np.random.rand(1)*0.1\n return result_chromos", "def cross_over(parent1, parent2):\n rnd_idx = np.random.choice(len(parent1))\n child = parent1[:rnd_idx] + parent2[rnd_idx:]\n return child", "def crossover(parent1: Individual, parent2: Individual, root_individual: RootIndividual,\n **kwargs) -> Tuple[Individual, Individual]:\n sieve = np.random.randint(2, size=len(parent1.params)) # Array of 0's and 1's\n not_sieve = sieve ^ 1 # Complement of sieve\n\n child1 = Individual(list(parent1.params * sieve + parent2.params * not_sieve), root_individual=root_individual)\n child2 = Individual(list(parent1.params * not_sieve + parent2.params * sieve), root_individual=root_individual)\n\n return child1, child2", "def crossover(population, kw=None, **kwargs):\n future_population = []\n while len(future_population) < len(population):\n p1, p2 = random.choice(population)['notes'], random.choice(population)['notes']\n split = random.randint(1, len(p1) - 1)\n map(future_population.append, [p1[:split] + p2[split:], p2[:split] + p1[split:]])\n return future_population", "def crossover(p1, p2, gamma=0.1):\n c1 = p1.deepcopy()\n c2 = p2.deepcopy()\n alpha = np.random.uniform(0, gamma, 1)\n c1.position = alpha * p1.position + (1 - alpha) * p2.position\n c2.position = alpha * p2.position + (1 - alpha) * p1.position\n return c1, c2", "def crossover(parent1, parent2, crossover_rate):\n if random.random() < crossover_rate:\n crossover = int(random.random() * len(parent1.sequence))\n chromo1 = \"{}{}\".format(\n parent1.sequence[:crossover],\n parent2.sequence[crossover:])\n chromo2 = \"{}{}\".format(\n parent2.sequence[:crossover],\n parent1.sequence[crossover:])\n return Chromosome(sequence=chromo1), Chromosome(sequence=chromo2)\n else:\n return parent1, parent2", "def cross_over(ind1, ind2):\n \n network1 = ind1.network\n network2 = ind2.network\n \n size = min(len(network1.index), len(network2.index))\n cx = random.randint(1, size - 1)\n \n temp = network1.copy()\n temp.iloc[:cx,:cx] = network2.iloc[:cx,:cx]\n network2.iloc[:cx,:cx] = network1.iloc[:cx,:cx]\n network1 = temp \n \n ind1.network = network1\n ind2.network = network2\n ind1.age = 1\n ind2.age = 1\n \n return ind1, ind2", "def cross_curr2pbest1(pop, ic, f, cr, rng, p_num, archive, arc_ind_cnt, task, **_kwargs):\n # Note: the population passed in the argument must be sorted by fitness!\n x_pbest = rng.integers(p_num)\n # a random individual is selected from the best p_num individuals of the population rng.integers\n p = [1 / (len(pop) - 1.0) if i != ic else 0 for i in range(len(pop))]\n r1 = rng.choice(len(pop), p=p) # a random individual != to the current individual is selected from the population\n p = [1 / (len(pop) + arc_ind_cnt - 2.0) if i != ic and i != r1 else 0 for i in range(len(pop) + arc_ind_cnt)]\n r2 = rng.choice(len(pop) + arc_ind_cnt, p=p)\n # a second random individual != to the current individual and r1 is selected from the population U archive\n j = rng.integers(task.dimension)\n if r2 >= len(pop):\n r2 -= len(pop)\n v = [pop[ic][i] + f * (pop[x_pbest][i] - pop[ic][i]) + f * (pop[r1][i] - archive[r2][i])\n if rng.random() < cr or i == j else pop[ic][i] for i in range(task.dimension)]\n return parent_medium(np.asarray(v), pop[ic].x, task.lower, task.upper)\n # the mutant vector is repaired if needed\n\n else:\n v = [pop[ic][i] + f * (pop[x_pbest][i] - pop[ic][i]) + f * (pop[r1][i] - pop[r2][i])\n if rng.random() < cr or i == j else pop[ic][i] for i in range(task.dimension)]\n return parent_medium(np.asarray(v), pop[ic].x, task.lower, task.upper)\n # the mutant vector is repaired if needed", "def crossover(self, candidates):\n xver = []\n for par1, par2 in candidates:\n n = min(par1.enc_path.shape[0], par2.enc_path.shape[0])\n x_point = np.random.randint(0, n - 1)\n child = Path()\n child.enc_path = np.vstack((par1.enc_path[0:x_point], par2.enc_path[x_point:]))\n xver.append(child)\n return xver", "def crossover(self, parents):\n\n randomCategory = random.sample(list(ga_.Category), 1)[0]\n randomParent1 = random.sample(parents, 1)[0]\n randomParent2 = None\n for parent in parents:\n if parent != randomParent1:\n randomParent2 = parent\n \n\n # put randomCategory from random parent to the new offpring and the remainder from the second parent\n offspring = ga_.Outfit()\n if randomCategory == ga_.Category.TOP:\n offspring.top = randomParent1.top\n offspring.bottom = randomParent2.bottom\n offspring.shoes = randomParent2.shoes\n offspring.neck = randomParent2.neck\n offspring.handbag = randomParent2.handbag\n elif randomCategory == ga_.Category.BOTTOM:\n offspring.top = randomParent2.top\n offspring.bottom = randomParent1.bottom\n offspring.shoes = randomParent2.shoes\n offspring.neck = randomParent2.neck\n offspring.handbag = randomParent2.handbag\n elif randomCategory == ga_.Category.SHOES:\n offspring.top = randomParent2.top\n offspring.bottom = randomParent2.bottom\n offspring.shoes = randomParent1.shoes\n offspring.neck = randomParent2.neck\n offspring.handbag = randomParent2.handbag\n elif randomCategory == ga_.Category.NECK:\n offspring.top = randomParent2.top\n offspring.bottom = randomParent2.bottom\n offspring.shoes = randomParent2.shoes\n offspring.neck = randomParent1.neck\n offspring.handbag = randomParent2.handbag\n elif randomCategory == ga_.Category.HANDBAG:\n offspring.top = randomParent2.top\n offspring.bottom = randomParent2.bottom\n offspring.shoes = randomParent2.shoes\n offspring.neck = randomParent2.neck\n offspring.handbag = randomParent1.handbag\n\n return offspring", "def parents_loop(self):\r\n while len(self.parents) > 0:\r\n children = 0\r\n self.parent1 = random.choice(self.parents)\r\n index = self.parents.index(self.parent1)\r\n del self.parents[index]\r\n\r\n self.parent2 = random.choice(self.parents)\r\n index = self.parents.index(self.parent2)\r\n del self.parents[index]\r\n\r\n while children < 2:\r\n self.child = copy.deepcopy(self.parent1)\r\n \r\n self.battery_loop()\r\n\r\n childsolution = random_algo.Random(self.child, self.cable_cost, self.battery_cost)\r\n childsolution.change_battery_or_house('change_battery')\r\n childsolution.change_battery_or_house('change_house')\r\n\r\n if (self.child.valid_solution() and self.child not in self.district_population\r\n and self.child not in self.best_districts and self.child not in self.worst_districts):\r\n self.district_population.append(self.child)\r\n self.cost_populations.append(self.child.total_cost(self.battery_cost, self.cable_cost))\r\n children += 1" ]
[ "0.73990524", "0.7297003", "0.7292777", "0.7076102", "0.69150895", "0.6808625", "0.6713596", "0.6706768", "0.6703259", "0.65825945", "0.6577948", "0.6534811", "0.649272", "0.64872974", "0.63988644", "0.63337296", "0.6228257", "0.6193814", "0.6190003", "0.6182483", "0.6107116", "0.6097322", "0.6084564", "0.6007151", "0.600569", "0.5983194", "0.5978748", "0.592614", "0.5912181", "0.59018165" ]
0.8853995
0
An opengraph description meta should be present if the person bio placeholder is set.
def test_templates_person_detail_open_graph_description_bio(self): person = PersonFactory() page = person.extended_object # Add a bio to a person placeholder = person.extended_object.placeholders.get(slot="bio") add_plugin( language="en", placeholder=placeholder, plugin_type="PlainTextPlugin", body="A biographic description of the person", ) page.publish("en") url = person.extended_object.get_absolute_url() response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertContains( response, '<meta property="og:description" content="A biographic description of the person" />', )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_templates_person_detail_open_graph_description_bio_exceeds_max_length(\n self,\n ):\n person = PersonFactory()\n page = person.extended_object\n placeholder_value = (\n \"Long description that describes the page with a summary. \" * 7\n )\n\n # Add a bio to a person\n placeholder = person.extended_object.placeholders.get(slot=\"bio\")\n add_plugin(\n language=\"en\",\n placeholder=placeholder,\n plugin_type=\"PlainTextPlugin\",\n body=placeholder_value,\n )\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n cut = placeholder_value[0:200]\n self.assertContains(\n response,\n f'<meta property=\"og:description\" content=\"{cut}\" />',\n )", "def test_templates_person_detail_open_graph_description_empty(self):\n person = PersonFactory()\n page = person.extended_object\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n self.assertNotContains(\n response,\n \"og:description\",\n )", "def test_templates_person_detail_meta_description_bio(self):\n person = PersonFactory()\n page = person.extended_object\n\n # Add a bio to a person\n placeholder = person.extended_object.placeholders.get(slot=\"bio\")\n add_plugin(\n language=\"en\",\n placeholder=placeholder,\n plugin_type=\"PlainTextPlugin\",\n body=\"A biographic description of the person\",\n )\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n self.assertContains(\n response,\n '<meta name=\"description\" content=\"A biographic description of the person\" />',\n )", "def test_templates_person_detail_meta_description_empty(self):\n person = PersonFactory()\n page = person.extended_object\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n self.assertNotContains(\n response,\n '<meta name=\"description\"',\n )", "def test_templates_person_detail_meta_description_bio_exceeds_max_length(self):\n person = PersonFactory()\n page = person.extended_object\n placeholder_value = (\n \"Long description that describes the page with a summary. \"\n \"Long description that describes the page with a summary. \"\n \"Long description that describes the page with a summary. \"\n )\n\n # Add a bio to a person\n placeholder = person.extended_object.placeholders.get(slot=\"bio\")\n add_plugin(\n language=\"en\",\n placeholder=placeholder,\n plugin_type=\"PlainTextPlugin\",\n body=placeholder_value,\n )\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n cut = placeholder_value[0:160]\n self.assertContains(\n response,\n f'<meta name=\"description\" content=\"{cut}\" />',\n )", "def _set_description(\n meta: Dict, description: Optional[Union[str, bool]] = None, **kwargs\n) -> Dict:\n if description is False or description is None:\n show_description_value = MetaWidget.DESCRIPTION_OPTION_NOTHING\n description = \"\"\n elif isinstance(description, str):\n show_description_value = MetaWidget.DESCRIPTION_OPTION_CUSTOM\n else:\n raise IllegalArgumentError(\n \"When using the add_card_widget or add_service_card_widget, 'description' must be \"\n \"'text_type' or None or False. Type is: {}\".format(type(description))\n )\n meta.update(\n {\n MetaWidget.SHOW_DESCRIPTION_VALUE: show_description_value,\n MetaWidget.CUSTOM_DESCRIPTION: description,\n }\n )\n return meta", "def test_templates_person_detail_cms_published_content_opengraph(self):\n person = PersonFactory(\n fill_portrait={\n \"original_filename\": \"portrait.jpg\",\n \"default_alt_text\": \"my portrait\",\n },\n should_publish=True,\n )\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n self.assertContains(response, '<meta property=\"og:type\" content=\"profile\" />')\n self.assertContains(\n response, f'<meta property=\"og:url\" content=\"http://example.com{url:s}\" />'\n )\n pattern = (\n r'<meta property=\"og:image\" content=\"http://example.com'\n r\"/media/filer_public_thumbnails/filer_public/.*portrait\\.jpg__200x200\"\n )\n self.assertIsNotNone(re.search(pattern, str(response.content)))\n self.assertContains(\n response, '<meta property=\"og:image:width\" content=\"200\" />'\n )\n self.assertContains(\n response, '<meta property=\"og:image:height\" content=\"200\" />'\n )", "def test_templates_person_detail_meta_description(self):\n person = PersonFactory()\n page = person.extended_object\n\n title_obj = page.get_title_obj(language=\"en\")\n title_obj.meta_description = \"A custom description of the person\"\n title_obj.save()\n\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n self.assertContains(\n response,\n '<meta name=\"description\" content=\"A custom description of the person\" />',\n )", "def _description(self):\n return None", "async def description(ctx, bot: typing.Union[discord.Member, discord.User]):\n data = await make_request(\"https://www.motiondevelopment.top/api/v1.2/bots/\", bot.id)\n if not bot.bot:\n return await r(ctx, \"Not a bot.\")\n\n if len(data[\"Big_desc\"]) > 2000:\n desc = data[\"Big_desc\"][:2000] + \\\n f\"...\\n[View original page for full description](https://www.motiondevelopment.top/bots/{bot.id})\"\n else:\n desc = data[\"Big_desc\"]\n await em(ctx, embed=discord.Embed(color=0xfecdea, description=desc))", "async def _forcedescription(self, ctx, *args):\n if len(args) < 2:\n await ctx.send(\"Include both a name and a description!\")\n return\n\n god = database.getGodName(args[0], ctx.guild.id)\n\n if god:\n desc = \"\"\n i = 1\n for arg in args:\n if i > 1:\n desc = desc + \" \" + arg\n i += 1\n desc.strip()\n\n if len(desc) > 100:\n await ctx.send(\"Keep the description under 100 chars, please.\")\n return\n\n database.setDesc(god.ID, desc)\n await ctx.send(\"Description set successfully!\")\n else:\n await ctx.send(\"No God found by that name!\")", "def description():", "def description(self, newDescription=None):\n pass", "def get_description():\n desc = dict()\n desc[\"cache\"] = 3600\n desc[\"data\"] = True\n desc[\n \"description\"\n ] = \"\"\"This plot is not meant for interactive use, but a backend for\n SPS plots.\n \"\"\"\n desc[\"arguments\"] = [\n dict(\n type=\"text\",\n name=\"pid\",\n default=\"202012300005-KDVN-WWUS83-SPSDVN\",\n label=\"IEM generated up to 35 char product identifier:\",\n ),\n dict(\n type=\"int\",\n default=0,\n name=\"segnum\",\n label=\"Product Segment Number (starts at 0):\",\n ),\n ]\n return desc", "def handle_meta(self, tag, attrs):\n ad = {}\n for tup in attrs:\n ad[tup[0]] = tup[1]\n if 'name' in ad.keys() \\\n and 'keywords' == ad['name'] \\\n and 'content' in ad.keys():\n self.filetype = ad['content']\n if 'name' in ad.keys() \\\n and 'description' == ad['name']:\n self.description = 'present'\n if 'charset' in ad.keys():\n self.charset = 'present'", "def set_description(self):\n if 'description' not in self.data:\n if self.verbose:\n click.echo('Adding empty descriptions to root')\n self.data['description'] = ''", "def get_description(self):", "def set_description(self, desc: str) -> None:\n self.metadata.data[\"description\"] = desc", "def get_description(self):\n pass", "def info(self, description: str, extra_description: Optional[str] = None):\n self.add(action=None,\n description=description,\n extra_description=extra_description,\n style=INFO_STYLE)", "def get_description():\n raise NotImplementedError", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")" ]
[ "0.68601227", "0.66034406", "0.65626204", "0.6385466", "0.61810136", "0.6127859", "0.6040194", "0.598596", "0.58483034", "0.5840213", "0.5766451", "0.5667915", "0.5608601", "0.55792457", "0.55779576", "0.55524266", "0.55337346", "0.55221784", "0.5492852", "0.54895395", "0.54735863", "0.54660904", "0.54660904", "0.54660904", "0.54660904", "0.54660904", "0.54660904", "0.54660904", "0.54660904", "0.54660904" ]
0.7378928
0
The "Organizations" section should not be displayed when empty.
def test_templates_person_detail_organizations_empty(self): person = PersonFactory(should_publish=True) # The "organizations" section should not be present on the public page url = person.public_extension.extended_object.get_absolute_url() response = self.client.get(url) self.assertContains(response, person.extended_object.get_title()) self.assertNotContains(response, "organization") # But it should be present on the draft page user = UserFactory(is_staff=True, is_superuser=True) self.client.login(username=user.username, password="password") url = person.extended_object.get_absolute_url() response = self.client.get(url) self.assertContains(response, person.extended_object.get_title()) self.assertContains(response, "organization-glimpse-list")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def organizations(self):\n self.elements('organizations')", "def display_org_with_default(self):\r\n if self.display_organization:\r\n return self.display_organization\r\n\r\n return self.org", "def test_organizations_list(self):\n pass", "def clean_organization(self):\n return self.organization", "def organizations(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"organizations\")", "def organizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"organizations\")", "def organizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"organizations\")", "def is_organization(self):\n return self._is_name_type(self.ORGANIZATION)", "def is_organization(self):\n return self.user_id is None", "def test_retrieve_l_organizations(self):\n pass", "def test_organizations_read(self):\n pass", "def test_get_organization(self):\n pass", "def organization_arns(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"organization_arns\")", "def test_retrieve_l_organization(self):\n pass", "def test_getorganizations_item(self):\n pass", "def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))", "def ListOrganizations(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def org():\n\n settings = current.deployment_settings\n ADMIN = current.session.s3.system_roles.ADMIN\n SECTORS = \"Clusters\" if settings.get_ui_label_cluster() \\\n else \"Sectors\"\n stats = lambda i: settings.has_module(\"stats\")\n\n return M(c=\"org\")(\n M(\"Organizations MSW\", f=\"organisation\")(\n M(\"Create\", m=\"create\"),\n M(\"Import\", m=\"import\"),\n M(\"TestSpiegel\", c=\"org\",f=\"spiegel\")\n ),\n M(\"Offices\", f=\"office\")(\n M(\"Create\", m=\"create\"),\n M(\"Map\", m=\"map\"),\n M(\"Import\", m=\"import\")\n ),\n M(\"Facilities\", f=\"facility\")(\n M(\"Create\", m=\"create\"),\n M(\"Import\", m=\"import\"),\n ),\n M(\"Resources\", f=\"resource\", m=\"summary\",\n check=stats)(\n M(\"Create\", m=\"create\"),\n M(\"Import\", m=\"import\")\n ),\n M(\"Organization Types\", f=\"organisation_type\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Office Types\", f=\"office_type\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Facility Types\", f=\"facility_type\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(SECTORS, f=\"sector\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n )", "def atlas_organizations():\n pass", "def test_templates_person_detail_maincontent_empty(self):\n person = PersonFactory(should_publish=True)\n\n # The \"organizations\" section should not be present on the public page\n url = person.public_extension.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertContains(response, person.extended_object.get_title())\n self.assertNotContains(response, \"person-detail__maincontent\")\n\n # But it should be present on the draft page\n user = UserFactory(is_staff=True, is_superuser=True)\n self.client.login(username=user.username, password=\"password\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertContains(response, person.extended_object.get_title())\n self.assertContains(response, \"person-detail__maincontent\")", "def listOrganizations(self, name='', type=''):\n return self.get_json('/organization', {'name': name, 'type': type})", "def organization(request, id):\n org = get_object_or_404(Organization, pk=id)\n context = {\n 'org': org,\n 'cover': modify_image_url(str(org.cover), 'cover'),\n 'logo': modify_image_url(str(org.logo), 'logo'),\n 'mission': \"\",\n 'values': \"\",\n 'members': \"\",\n }\n\n context['mission'] = org.mission\n\n if org.value_set.exists():\n context['values'] = org.value_set.all()\n\n if org.membership_set.exists():\n membership = org.membership_set.all()\n context['members'] = []\n for member in membership:\n m = User.objects.get(pk=member.user_id)\n context['members'].append(m)\n\n return render(request, 'wantedly_app/org.html', context)", "def organization_name(self):\n if self.organization is not None:\n return self.organization.name\n\n return ''", "def test_add_organization(self):\n pass", "def filter_organisation(self, org_name):\n return self.form.set_value(\"organisation search\", org_name)", "def org():\n\n ADMIN = current.session.s3.system_roles.ADMIN\n SECTORS = \"Clusters\" if current.deployment_settings.get_ui_label_cluster() \\\n else \"Sectors\"\n\n return M(c=\"org\")(\n M(\"Organizations\", f=\"organisation\")(\n M(\"Create\", m=\"create\"),\n M(\"Import\", m=\"import\")\n ),\n M(\"Facilities\", f=\"facility\", m=\"summary\")(\n M(\"Create\", m=\"create\"),\n M(\"Map\", m=\"map\"),\n M(\"Import\", m=\"import\"),\n ),\n M(\"Offices\", f=\"office\")(\n M(\"Create\", m=\"create\"),\n M(\"Map\", m=\"map\"),\n M(\"Import\", m=\"import\")\n ),\n M(\"Resources\", f=\"resource\", m=\"summary\")(\n M(\"Create\", m=\"create\"),\n M(\"Import\", m=\"import\")\n ),\n M(\"Organization Types\", f=\"organisation_type\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Service Types\", f=\"service\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Office Types\", f=\"office_type\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Facility Types\", f=\"facility_type\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(SECTORS, f=\"sector\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n )", "def test_get_all_organization(self):\n self.client.force_authenticate(user=self.inventory_manager)\n response = self.client.get(\"/organization/\")\n self.assertEqual(response.status_code,\n status.HTTP_403_FORBIDDEN)", "def test_getorgs(self):\n pass", "def get_organizations_to_delete():\n\n all_organizations = seed.models.Organization.objects.all()\n bad_organizations = [org for org in all_organizations if org.id not in get_core_organizations()]\n return bad_organizations", "def is_organizer(self):\n if \"isOrganizer\" in self._prop_dict:\n return self._prop_dict[\"isOrganizer\"]\n else:\n return None" ]
[ "0.6324429", "0.615348", "0.6080977", "0.59967095", "0.5937215", "0.5870967", "0.5870967", "0.5729163", "0.5728357", "0.5711528", "0.56988746", "0.56364673", "0.55923396", "0.55328053", "0.5492391", "0.5482238", "0.54775167", "0.5456552", "0.54380375", "0.5429035", "0.54202235", "0.536899", "0.5290806", "0.52854115", "0.5270431", "0.5263924", "0.5260686", "0.5257707", "0.52495486", "0.5245319" ]
0.71524966
0
The "maincontent" placeholder block should not be displayed on the public page when empty but only on the draft version for staff.
def test_templates_person_detail_maincontent_empty(self): person = PersonFactory(should_publish=True) # The "organizations" section should not be present on the public page url = person.public_extension.extended_object.get_absolute_url() response = self.client.get(url) self.assertContains(response, person.extended_object.get_title()) self.assertNotContains(response, "person-detail__maincontent") # But it should be present on the draft page user = UserFactory(is_staff=True, is_superuser=True) self.client.login(username=user.username, password="password") url = person.extended_object.get_absolute_url() response = self.client.get(url) self.assertContains(response, person.extended_object.get_title()) self.assertContains(response, "person-detail__maincontent")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_empty_posts(self, client, site, homepage):\n response = client.get(homepage.relative_url(site))\n assertTemplateNotUsed(response, \"snippets/carousel.html\")", "def show_homepage():\n\n return render_template(\"blank-slate.html\")", "def test_not_microsite_anonymous_homepage_content(self):\r\n\r\n resp = self.client.get('/')\r\n self.assertEqual(resp.status_code, 200)\r\n\r\n # assert various branding definitions on this Microsite ARE NOT VISIBLE\r\n\r\n self.assertNotContains(resp, 'This is a Test Microsite Overlay') # Overlay test message\r\n self.assertNotContains(resp, 'test_microsite/images/header-logo.png') # logo swap\r\n self.assertNotContains(resp, 'test_microsite/css/test_microsite') # css override\r\n self.assertNotContains(resp, '<title>Test Microsite</title>') # page title\r\n\r\n # assert that test course display name IS NOT VISIBLE, since that is a Microsite only course\r\n self.assertNotContains(resp, 'Robot_Super_Course')\r\n\r\n # assert that test course that is outside microsite IS VISIBLE\r\n self.assertContains(resp, 'Robot_Course_Outside_Microsite')\r\n\r\n # assert that footer template has been properly overriden on homepage\r\n self.assertNotContains(resp, 'This is a Test Microsite footer')", "def test_preview_no_asides(self):\n course = CourseFactory.create(default_store=ModuleStoreEnum.Type.split)\n html = ItemFactory.create(\n parent_location=course.location,\n category=\"html\",\n data={'data': \"<html>foobar</html>\"}\n )\n\n config = StudioConfig.current()\n config.enabled = False\n config.save()\n\n request = RequestFactory().get('/dummy-url')\n request.user = UserFactory()\n request.session = {}\n\n # Call get_preview_fragment directly.\n context = {\n 'reorderable_items': set(),\n 'read_only': True\n }\n html = get_preview_fragment(request, html, context).content\n\n self.assertNotRegex(html, r\"data-block-type=[\\\"\\']test_aside[\\\"\\']\")\n self.assertNotRegex(html, \"Aside rendered\")", "def test_empty_featured_pages(self, client, site, homepage):\n # no about or consult page: nothing rendered\n response = client.get(homepage.relative_url(site))\n assertTemplateNotUsed(response, \"cdhpages/snippets/featured_pages.html\")\n # only about page: nothing rendered\n about = ContentPage(title=\"about\", slug=\"about\", body=\"\")\n homepage.add_child(instance=about)\n homepage.save()\n response = client.get(homepage.relative_url(site))\n assertTemplateNotUsed(response, \"cdhpages/snippets/featured_pages.html\")\n # only consults page is live: nothing rendered\n about.unpublish()\n consult = ContentPage(title=\"consult\", slug=\"consult\")\n homepage.add_child(instance=consult)\n homepage.save()\n response = client.get(homepage.relative_url(site))\n assertTemplateNotUsed(response, \"cdhpages/snippets/featured_pages.html\")\n # both pages live; should render\n about.save_revision().publish()\n response = client.get(homepage.relative_url(site))\n assertTemplateUsed(response, \"cdhpages/snippets/featured_pages.html\")", "def clear_initial_content(self):\n self.login(self.manager)\n self.portal.manage_delObjects(['eingangskorb'])\n self.portal.manage_delObjects(['private'])\n self.portal.manage_delObjects(['vorlagen'])\n\n catalog = api.portal.get_tool('portal_catalog')\n initial_content = catalog(portal_type=[\n 'opengever.inbox.container',\n 'opengever.inbox.inbox',\n 'opengever.private.root',\n 'opengever.dossier.templatefolder',\n ])\n assert len(initial_content) == 0", "def test_create_page_with_main_box(self):\n\n main_block = PageMainBlock.objects.create(**_main_block_data)\n Page.objects.create(main_block=main_block, **_page_data)\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n self.assertIn('text', response.context)\n self.assertIn('title', response.context)\n self.assertIn('image', response.context)", "def test_empty_public(self):\n self.do_visible(True, None, True, is_admin=True)", "def fix_default_content(portal):\n logger = logging.getLogger(PROJECTNAME)\n content_ids = ['front-page', 'events', ]\n portal_ids = portal.objectIds()\n for cId in content_ids:\n if cId in portal_ids:\n portal.manage_delObjects([cId])\n logger.info('Deleted object with id %s' % cId)\n if 'news' in portal_ids:\n news = portal['news']\n news.setTitle(u'Notícias')\n news.setDescription(u'Notícias do Plone Symposium')\n news.reindexObject()\n if 'Members' in portal_ids:\n # Hide user's tab\n members = portal['Members']\n members.setTitle(u'Participantes')\n members.setExcludeFromNav(True)\n members.reindexObject()\n\n logger.info('Cleaned up portal contents')", "def test_templates_person_detail_cms_draft_content(self):\n user = UserFactory(is_staff=True, is_superuser=True)\n self.client.login(username=user.username, password=\"password\")\n\n published_category = CategoryFactory(should_publish=True)\n not_published_category = CategoryFactory()\n\n published_organization = OrganizationFactory(should_publish=True)\n not_published_organization = OrganizationFactory()\n\n person = PersonFactory(\n page_title=\"My page title\",\n fill_portrait=True,\n fill_bio=True,\n fill_maincontent=True,\n fill_categories=[published_category, not_published_category],\n fill_organizations=[published_organization, not_published_organization],\n )\n\n # Modify the draft version of the published category\n title_obj = published_category.extended_object.title_set.get(language=\"en\")\n title_obj.title = \"modified category\"\n title_obj.save()\n\n # Modify the draft version of the published organization\n title_obj = published_category.extended_object.title_set.get(language=\"en\")\n title_obj.title = \"modified organization\"\n title_obj.save()\n page = person.extended_object\n\n # The page should be visible as draft to the superuser\n url = page.get_absolute_url()\n response = self.client.get(url)\n content = htmlmin.minify(\n response.content.decode(\"UTF-8\"),\n reduce_empty_attributes=False,\n remove_optional_attribute_quotes=False,\n )\n\n self.assertContains(\n response,\n \"<title>My page title - example.com</title>\",\n html=True,\n status_code=200,\n )\n title = person.extended_object.get_title()\n self.assertContains(\n response,\n f'<h1 class=\"subheader__title\">{title:s}</h1>',\n html=True,\n )\n\n # Main content should be present when not empty\n self.assertContains(response, \"person-detail__maincontent\")\n\n # The published category should be on the page in its published version\n self.assertContains(\n response,\n (\n # pylint: disable=consider-using-f-string\n '<a class=\"category-badge\" href=\"{:s}\">'\n '<span class=\"offscreen\">Category</span>'\n '<span class=\"category-badge__title\">{:s}</span></a>'\n ).format(\n published_category.public_extension.extended_object.get_absolute_url(),\n published_category.public_extension.extended_object.get_title(),\n ),\n html=True,\n )\n # The not published category should not be on the page\n self.assertContains(\n response,\n (\n # pylint: disable=consider-using-f-string\n '<a class=\"category-badge category-badge--draft\" href=\"{:s}\">'\n '<span class=\"offscreen\">Category</span>'\n '<span class=\"category-badge__title\">{:s}</span></a>'\n ).format(\n not_published_category.extended_object.get_absolute_url(),\n not_published_category.extended_object.get_title(),\n ),\n html=True,\n )\n\n # The published organization should be on the page in its published version\n self.assertIn(\n # pylint: disable=consider-using-f-string\n '<div class=\"organization-glimpse\" property=\"contributor\" '\n 'typeof=\"CollegeOrUniversity\"><a href=\"{:s}\" title=\"{:s}\">'.format(\n published_organization.extended_object.get_absolute_url(),\n published_organization.extended_object.get_title(),\n ),\n content,\n )\n self.assertContains(\n response,\n # pylint: disable=consider-using-f-string\n '<h2 class=\"organization-glimpse__title\" property=\"name\">{:s}</h2>'.format(\n published_organization.public_extension.extended_object.get_title()\n ),\n html=True,\n )\n # The not published organization should not be on the page\n self.assertIn(\n # pylint: disable=consider-using-f-string\n '<a href=\"{:s}\" title=\"{:s}\">'.format(\n not_published_organization.extended_object.get_absolute_url(),\n not_published_organization.extended_object.get_title(),\n ),\n content,\n )\n\n self.assertContains(\n response,\n # pylint: disable=consider-using-f-string\n '<h2 class=\"organization-glimpse__title\" property=\"name\">{:s}</h2>'.format(\n not_published_organization.extended_object.get_title()\n ),\n html=True,\n )\n\n self.assertNotContains(response, \"modified\")", "def test_empty_private(self):\n self.do_visible(True, None, False, is_admin=True)", "def test_none_content_object_production(self):\n tmpl = Template(\"\"\"\n output:\n {% load editregion %}\n {% editregion \"test\" None %}fallback{% endeditregion %}\n \"\"\")\n self.assertEqual('output:', tmpl.render(Context()).strip())", "def test_placeholder_untranslated_content(self):\n from pages import settings as pages_settings\n setattr(pages_settings, \"PAGE_USE_SITE_ID\", False)\n page = self.new_page(content={})\n placeholder = PlaceholderNode('untrans', page='p', untranslated=True)\n placeholder.save(page, 'fr-ch', 'test-content', True)\n placeholder.save(page, 'en-us', 'test-content', True)\n self.assertEqual(len(Content.objects.all()), 1)\n self.assertEqual(Content.objects.all()[0].language, 'en-us')\n\n placeholder = PlaceholderNode('untrans', page='p', untranslated=False)\n placeholder.save(page, 'fr-ch', 'test-content', True)\n self.assertEqual(len(Content.objects.all()), 2)\n\n # test the syntax\n page = self.new_page()\n template = django.template.loader.get_template(\n 'pages/tests/untranslated.html')\n context = Context({'current_page': page, 'lang':'en-us'})\n self.assertEqual(template.render(context), '')", "def add_default_content(self):\n data = get_default_eventpage_data()\n\n for i, section in enumerate(data):\n section[\"position\"] = i\n section[\"content\"] = render_to_string(section[\"template\"])\n del section[\"template\"]\n self.content.create(**section)", "def test_microsite_anonymous_homepage_content(self):\r\n\r\n resp = self.client.get('/', HTTP_HOST=MICROSITE_TEST_HOSTNAME)\r\n self.assertEqual(resp.status_code, 200)\r\n\r\n # assert various branding definitions on this Microsite\r\n # as per the configuration and Microsite overrides\r\n\r\n self.assertContains(resp, 'This is a Test Microsite Overlay') # Overlay test message\r\n self.assertContains(resp, 'test_microsite/images/header-logo.png') # logo swap\r\n self.assertContains(resp, 'test_microsite/css/test_microsite') # css override\r\n self.assertContains(resp, 'Test Microsite') # page title\r\n\r\n # assert that test course display name is visible\r\n self.assertContains(resp, 'Robot_Super_Course')\r\n\r\n # assert that test course that is outside microsite is not visible\r\n self.assertNotContains(resp, 'Robot_Course_Outside_Microsite')\r\n\r\n # assert that footer template has been properly overriden on homepage\r\n self.assertContains(resp, 'This is a Test Microsite footer')\r\n\r\n # assert that the edX partners section is not in the HTML\r\n self.assertNotContains(resp, '<section class=\"university-partners university-partners2x6\">')\r\n\r\n # assert that the edX partners tag line is not in the HTML\r\n self.assertNotContains(resp, 'Explore free courses from')", "def test_blank_content_object_production(self):\n tmpl = Template(\"\"\"\n {% load editregion %}\n {% editregion \"test\" obj %}fallback{% endeditregion %}\n \"\"\")\n self.assertEqual('fallback', tmpl.render(Context()).strip())", "def test_placeholder_inherit_content(self):\n from pages import settings as pages_settings\n setattr(pages_settings, \"PAGE_USE_SITE_ID\", False)\n author = User.objects.all()[0]\n p1 = self.new_page(content={'inher':'parent-content'})\n p2 = self.new_page()\n template = django.template.loader.get_template('pages/tests/test7.html')\n context = Context({'current_page': p2, 'lang':'en-us'})\n self.assertEqual(template.render(context), '')\n \n p2.move_to(p1, position='first-child')\n self.assertEqual(template.render(context), 'parent-content')", "def test_emptySectionsOmitted(self):\n for ticket in self.project.children():\n if ticket.splitext()[1] in ('.feature', '.misc', '.doc'):\n ticket.remove()\n\n self.builder.build(\n self.project, self.project.child('NEWS'),\n 'Some Thing 1.2')\n\n self.assertEquals(\n self.project.child('NEWS').getContent(),\n 'Some Thing 1.2\\n'\n '==============\\n'\n '\\n'\n 'Bugfixes\\n'\n '--------\\n'\n ' - Broken stuff was fixed. (#23)\\n'\n '\\n'\n 'Deprecations and Removals\\n'\n '-------------------------\\n'\n ' - Stupid stuff was deprecated. (#25)\\n'\n '\\n\\n'\n 'Here is stuff which was present previously.\\n')", "def test_empty_projects(self, client, site, homepage):\n response = client.get(homepage.relative_url(site))\n assertTemplateNotUsed(response, \"projects/snippets/project_card.html\")", "def test_public_child_container_preview_html(self):\n empty_child_container = self._create_item(self.vertical.location, 'split_test', 'Split Test')\n published_empty_child_container = self.store.publish(empty_child_container.location, self.user.id)\n self.validate_preview_html(published_empty_child_container, self.reorderable_child_view, can_add=False)", "def test_main_page(remote_browser, base_url, logger_fixture):\n logger_fixture.info(\"===== test_main_page =====\")\n main_page = MainPage(remote_browser, base_url)\n main_page\\\n .open(logger_fixture)\\\n .check_featured_block_is_not_empty()", "def test_none_content_object_debug(self):\n tmpl = Template(\"\"\"\n output:\n {% load editregion %}\n {% editregion \"test\" None %}fallback{% endeditregion %}\n \"\"\")\n if is_django_15plus():\n with self.assertRaisesRegexp(ImproperlyConfigured,\n 'no object provided to the \"editregion\" '\n 'template tag forregion \"test\"'):\n tmpl.render(Context()).strip()\n else:\n with self.assertRaisesRegexp(ValueError,\n \"content_object was probably '', \"\n \"check the context provided\"):\n tmpl.render(Context()).strip()", "def test_public_container_preview_html(self):\n published_unit = self.store.publish(self.vertical.location, self.user.id)\n published_child_container = self.store.get_item(self.child_container.location)\n published_child_vertical = self.store.get_item(self.child_vertical.location)\n self.validate_preview_html(published_unit, self.container_view)\n self.validate_preview_html(published_child_container, self.container_view)\n self.validate_preview_html(published_child_vertical, self.reorderable_child_view)", "def test_empty_public_owned(self):\n self.do_visible(True, 'pattieblack', True, is_admin=True)", "def test_blank_content_object_debug(self):\n tmpl = Template(\"\"\"\n output:\n {% load editregion %}\n {% editregion \"test\" obj %}fallback{% endeditregion %}\n \"\"\")\n with self.assertRaisesRegexp(ValueError, \"content_object was probably \"\n \"'', check the context \"\n \"provided\"):\n tmpl.render(Context()).strip()", "def content(request):\n if not request.method in ('GET', 'POST'):\n raise Http404(\"Invalid method: {}\".format(request.method))\n\n if not authuser_is_user(request.user):\n raise Http404(\"User does not have a profile, contact admin\")\n\n section = getattr(request, request.method).get('section', None)\n if section not in SECTION_CHOICES:\n raise Http404(\"valid section required\")\n content = latest_content(request.user, section)\n\n if request.method == 'POST':\n form = PartialContentForm(request.POST, instance=content)\n if form.is_valid():\n logging.info(\"creating record with data: {}\".format(form.save(commit=False)))\n form.save()\n\n form = PartialContentForm(instance=content)\n return render(request, 'editor/content-form.html', {'form': form})", "def test_that_not_logged_in_does_not_shows_edit_button(testapp):\n html = testapp.get('/journal/1').html\n assert not html.find('main').a", "def test_container_no_assets(self):\n context = {}\n container_name = \"left\"\n html = container(context, container_name)\n self.assertIn(\"storybase-container-placeholder\", html)\n self.assertIn(container_name, html)", "def under_construction(request):\n return render(request, 'under_construction.html', {})", "def main_layout(app: dash.Dash, data: GameData, content: html) -> html:\n layout = html.Div([\n html.Header(get_header(app, data)),\n html.Main(id='page-content', children=[content]),\n html.Footer(get_footer())\n ])\n\n return layout" ]
[ "0.58954334", "0.5879357", "0.58698666", "0.58275884", "0.5777995", "0.57750595", "0.5751515", "0.5750153", "0.5733736", "0.5710872", "0.55958956", "0.558564", "0.55835044", "0.55611306", "0.5554163", "0.5551871", "0.5529988", "0.5525225", "0.54813397", "0.54223466", "0.541176", "0.5381566", "0.5307258", "0.5198383", "0.5188607", "0.518557", "0.5179037", "0.5149003", "0.5127137", "0.5125595" ]
0.6660572
0
The courses to which a person has participated should appear on this person's detail page.
def test_templates_person_detail_related_courses(self): user = UserFactory(is_staff=True, is_superuser=True) self.client.login(username=user.username, password="password") person = PersonFactory() course = CourseFactory(fill_team=[person]) url = person.extended_object.get_absolute_url() response = self.client.get(url) # The course should be present on the page self.assertContains( response, '<span class="course-glimpse__title-text">{0:s}</span>'.format( # noqa pylint: disable=consider-using-f-string,line-too-long course.extended_object.get_title() ), html=True, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_courses(self):\r\n\r\n return self.def_ms.get_courses()", "def get_courses(self):\n if not self.is_course_based_activity():\n raise IllegalState()\n else:\n raise Unimplemented()", "def see_teaching_courses(self, username: str, token: str) -> List[Dict[str, object]]:\n\n # Validate user first\n if not self.validate(username=username, token=token, check_privilege='instructor'):\n raise RuntimeError(\"User not verified!\")\n\n # Get UID from user's username\n uid = self.get_uid(username=username)\n\n # Query database for courses instructed by a user with this UID\n cursor = self._db_connection.cursor()\n cursor.execute(\n '''\n SELECT \n course_id,\n course_abbreviation,\n course_name, \n time,\n seats \n FROM \n courses\n WHERE \n instructor_id = ?\n ;\n ''', (uid,))\n\n db_results = cursor.fetchall()\n\n if db_results is None:\n print(\"No associated courses found!\")\n return []\n\n # Build information dicts for every course this user is instructing\n courses = []\n for result in db_results:\n # Get the number of students enrolled in this course already\n cursor.execute('''SELECT COUNT(*) FROM enrollment_records WHERE course_id = ?;''', (result[0],))\n students_enrolled = cursor.fetchone()[0]\n if students_enrolled is None:\n students_enrolled = 0\n\n # Build a course dict from the data\n courses.append({\n \"course_abbreviation\": result[1],\n \"course_name\": result[2],\n \"time\": result[3],\n \"students_enrolled\": students_enrolled,\n \"capacity\": result[4],\n })\n\n return courses", "def available_courses(self):\r\n def _get_course_name(el):\r\n # The first component in the link text is the course number\r\n _, course_name = el.text.split(' ', 1)\r\n return course_name\r\n\r\n return self.q(css='section.info > hgroup > h3 > a').map(_get_course_name).results", "def course_info(self):\n print(\"Course name: {}\".format(self._course_name))\n print(\"Lead teacher: {}\".format(self._teacher))\n\n if len(self._students) == 0:\n print(\"Course does not enrolled by any student\")\n else:\n print(\"Enrolled: {}/{}\".format(len(self._students), self._total_place))", "def show_courses_and_profs(request):\r\n\tcourses = course_views.all_courses(request)\r\n\treturn render(request, 'templates/browse.html', {'courses': courses})", "def my_courses(self, signer):\n return list(chain(*[p.user_courses(signer=signer) for p in self.providers]))", "def getUserCoursesList(self, chat_id):\n\t\tcommand = \"SELECT ID, name, description FROM courses WHERE author_id=?;\"\n\t\tparams = (chat_id,)\n\n\t\tdata = self._run_command(command, params)\n\n\t\tif not data:\n\t\t\treturn None\n\n\t\tresult = []\n\t\tfor i in data:\n\t\t\tresult.append({\"ID\": i[0], \n\t\t\t\t\"name\": i[1],\n\t\t\t\t\"description\": i[2] if i[2] else \"\",\n\t\t\t\t})\n\n\t\treturn result", "def get_courses(self, depth=0):\r\n return self.courses.values()", "def _accessible_courses_list(request):\r\n courses = modulestore('direct').get_courses()\r\n\r\n # filter out courses that we don't have access to\r\n def course_filter(course):\r\n \"\"\"\r\n Get courses to which this user has access\r\n \"\"\"\r\n if GlobalStaff().has_user(request.user):\r\n return course.location.course != 'templates'\r\n\r\n return (has_course_access(request.user, course.id)\r\n # pylint: disable=fixme\r\n # TODO remove this condition when templates purged from db\r\n and course.location.course != 'templates'\r\n )\r\n courses = filter(course_filter, courses)\r\n return courses", "def courses_with_role(self):\r\n return CourseAccessRole.objects.filter(role=self.role, user=self.user)", "def course_pages(self):\n return None", "def course_pages(self):\n return None", "def test_templates_person_detail_related_max_courses(self, _mock_page_url):\n # Create our dummy person and the 3 courses we'll attach to it\n person = PersonFactory(should_publish=True)\n courses = CourseFactory.create_batch(3, fill_team=[person], should_publish=True)\n # Link the 3 courses with our person through the relevant placeholder\n for course in courses:\n add_plugin(\n course.extended_object.placeholders.get(slot=\"course_team\"),\n PersonPlugin,\n \"en\",\n page=person.extended_object,\n )\n # Make sure we do have 3 courses on the person\n self.assertEqual(person.get_courses().count(), 3)\n\n # Only the first two are rendered in the template\n response = self.client.get(person.extended_object.get_absolute_url())\n self.assertContains(response, courses[0].extended_object.get_title())\n self.assertContains(response, courses[1].extended_object.get_title())\n self.assertNotContains(response, courses[2].extended_object.get_title())\n\n # There is a link to view more related courses directly in the Search view\n self.assertContains(\n response,\n f'href=\"/the/courses/?persons={person.get_es_id()}\"',\n )\n self.assertContains(\n response,\n f\"See all courses related to {person.extended_object.get_title():s}\",\n )", "def course(self):\n return self.section.course", "def satisfying_courses(self):\n return (\n Course.objects.all()\n .exclude(id__in=self.overrides.all())\n .filter(\n Q(department__in=self.departments.all(), semester=self.semester)\n | Q(id__in=self.courses.all())\n )\n )", "def courses(request):\r\n courses = get_courses(request.user, request.META.get('HTTP_HOST'))\r\n courses = sort_by_announcement(courses)\r\n\r\n return render_to_response(\"courseware/courses.html\", {'courses': courses})", "def _get_courses(self) -> None:\n\n courses_content: NavigableString = self.soup.find(\"div\", \n {\"class\": \"coursesContent\"})\n course_items: ResultSet = courses_content.find_all(\"div\", \n {\"class\": \"courseItem\"})\n\n for item in course_items:\n course_name: str = item.a[\"href\"].split(\"/\")[-2].lower()\n course_data: ParseType = self._parse(item)\n self._update(course_name, course_data)", "def faculty_courses_for_user(user):\n return Course.objects.filter(faculty_group__in=user.groups.all())", "def get_courses(self, *args):\n courses = []\n user = self.context['user']\n modules = user.profile.purchased_modules.all()\n for module in modules:\n course_id = self.course_in_courses(module.course.mnemo, courses)\n if course_id:\n courses[course_id[0]]['modules'].append({'mnemo': module.mnemo})\n else:\n courses.append({\n 'mnemo': module.course.mnemo,\n 'modules': [{'mnemo': module.mnemo}]\n })\n return courses", "def extract_courses():\n if settings.XPRO_COURSES_API_URL:\n return requests.get(settings.XPRO_COURSES_API_URL, timeout=20).json()\n return []", "def get_product_courses(product):\n if product.content_type.model == CONTENT_TYPE_MODEL_COURSERUN:\n return [product.content_object.course]\n elif product.content_type.model == CONTENT_TYPE_MODEL_COURSE:\n return [product.content_object]\n elif product.content_type.model == CONTENT_TYPE_MODEL_PROGRAM:\n return list(\n product.content_object.courses.all().order_by(\"position_in_program\")\n )", "def getCoursesList(self, pageSize=100):\n results = self.service.courses().list(pageSize=pageSize).execute()\n self.courses = results.get('courses', [])\n if not self.courses:\n return []\n return self.courses # Might not have to return self.courses, but it's useful for now", "def get_courses(std):\n return std[2]", "def get_courses(db: Session = Depends(get_db)): # , _: models.User = Depends(get_current_user))\n return crud.course.get_multi(db, skip=0, limit=100)", "def to_string(self):\n return \"{base_msg} Courses: {courses}\".format(\n base_msg=super().to_string(),\n courses=self.courses_string()\n )", "def search_courses():\n current_user = view_helpers.get_current_user()\n courses, has_more = m.Course.search(flask.request.values, current_user)\n\n course_dicts, user_course_dicts, _ = (\n m.Course.get_course_and_user_course_dicts(courses, current_user))\n\n return api_util.jsonify({\n 'courses': course_dicts,\n 'user_courses': user_course_dicts,\n 'has_more': has_more,\n })", "def display_courses(courses):\n\n _print('You can access %d courses' % len(courses))\n for i, course in enumerate(courses, 1):\n _print('%d - [%s] - %s' % (i, course.state, course.name))", "def _accessible_courses_list_from_groups(request):\r\n courses_list = {}\r\n\r\n instructor_courses = UserBasedRole(request.user, CourseInstructorRole.ROLE).courses_with_role()\r\n staff_courses = UserBasedRole(request.user, CourseStaffRole.ROLE).courses_with_role()\r\n all_courses = instructor_courses | staff_courses\r\n\r\n for course_access in all_courses:\r\n course_key = course_access.course_id\r\n if course_key not in courses_list:\r\n course = modulestore('direct').get_course(course_key)\r\n if course is None:\r\n raise ItemNotFoundError(course_key)\r\n courses_list[course_key] = course\r\n\r\n return courses_list.values()", "def get_courses_info(url, headers):\n dash = get_page_contents(url, headers)\n soup = BeautifulSoup(dash)\n courses_soup = soup.find_all('article', 'course')\n courses = []\n for course_soup in courses_soup:\n course_id = None\n course_name = course_soup.h3.text.strip()\n course_url = None\n course_state = 'Not yet'\n try:\n # started courses include the course link in the href attribute\n course_url = BASE_URL + course_soup.a['href']\n if course_url.endswith('info') or course_url.endswith('info/'):\n course_state = 'Started'\n # The id of a course in edX is composed by the path\n # {organization}/{course_number}/{course_run]\n course_id = course_soup.a['href'][9:-5]\n except KeyError:\n pass\n courses.append(Course(id=course_id,\n name=course_name,\n url=course_url,\n state=course_state))\n return courses" ]
[ "0.70524913", "0.68237513", "0.67915165", "0.6646394", "0.65155363", "0.64971614", "0.6459945", "0.64266986", "0.6408845", "0.6397062", "0.6390385", "0.634456", "0.634456", "0.62637556", "0.625636", "0.62397075", "0.62369204", "0.619848", "0.61810017", "0.6175437", "0.6159269", "0.6137949", "0.6128802", "0.60955405", "0.5993394", "0.59243166", "0.5900433", "0.58864117", "0.5870485", "0.58628875" ]
0.68516326
1
Make sure the person detail page does not display too many courses, even when a large number are related to the current person, as this can cause the page to load very slowly and is not a great experience for the user anyway.
def test_templates_person_detail_related_max_courses(self, _mock_page_url): # Create our dummy person and the 3 courses we'll attach to it person = PersonFactory(should_publish=True) courses = CourseFactory.create_batch(3, fill_team=[person], should_publish=True) # Link the 3 courses with our person through the relevant placeholder for course in courses: add_plugin( course.extended_object.placeholders.get(slot="course_team"), PersonPlugin, "en", page=person.extended_object, ) # Make sure we do have 3 courses on the person self.assertEqual(person.get_courses().count(), 3) # Only the first two are rendered in the template response = self.client.get(person.extended_object.get_absolute_url()) self.assertContains(response, courses[0].extended_object.get_title()) self.assertContains(response, courses[1].extended_object.get_title()) self.assertNotContains(response, courses[2].extended_object.get_title()) # There is a link to view more related courses directly in the Search view self.assertContains( response, f'href="/the/courses/?persons={person.get_es_id()}"', ) self.assertContains( response, f"See all courses related to {person.extended_object.get_title():s}", )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_templates_person_detail_related_courses(self):\n user = UserFactory(is_staff=True, is_superuser=True)\n self.client.login(username=user.username, password=\"password\")\n\n person = PersonFactory()\n course = CourseFactory(fill_team=[person])\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n\n # The course should be present on the page\n self.assertContains(\n response,\n '<span class=\"course-glimpse__title-text\">{0:s}</span>'.format( # noqa pylint: disable=consider-using-f-string,line-too-long\n course.extended_object.get_title()\n ),\n html=True,\n )", "def test_render_page_big_course(self):\n url = reverse('completion_view', kwargs={'course_id': self.course.id})\n self.response = self.staff_client.get(url)\n self.assertEqual(self.response.status_code, 200)", "def test_render_data_is_bigcourse_wrong_params(self):\n url = '{}'.format(reverse(\n 'completion_data_view', kwargs={\n 'course_id': self.course.id}))\n self.response = self.client.get(url)\n self.assertEqual(self.response.status_code, 404)\n\n url = '{}?is_bigcourse=asd'.format(reverse(\n 'completion_data_view', kwargs={\n 'course_id': self.course.id}))\n self.response = self.client.get(url)\n self.assertEqual(self.response.status_code, 404)", "def get(self, request):\n about_persons = AboutPerson.objects.all()\n funders = about_persons.filter(funder_or_adviser='funder').order_by('position')\n advisers = about_persons.filter(funder_or_adviser='adviser').order_by('position')\n architects = about_persons.filter(funder_or_adviser='architect').order_by('position')\n engineers = about_persons.filter(funder_or_adviser='engineer').order_by('position')\n previous_architects = about_persons.filter(funder_or_adviser='previous architect').order_by('position')\n previous_engineers = about_persons.filter(funder_or_adviser='previous engineer').order_by('position')\n can_edit = False\n if request.user.is_authenticated:\n user_prof = UserProfile.objects.get(user=request.user)\n if user_prof.user.email == \"[email protected]\":\n can_edit = True\n if (user_prof.is_contributor):\n contrib_prof = ContributorProfile.objects.get(user_profile=user_prof)\n return render(request, \"about.html\", {'user_prof': user_prof, 'contrib_prof': contrib_prof, 'funders': funders,\n 'advisers': advisers, 'architects': architects, 'engineers': engineers,\n 'previous_architects': previous_architects, 'previous_engineers': previous_engineers,'can_edit': can_edit})\n else:\n return render(request, \"about.html\", {'user_prof': user_prof, 'funders': funders, 'advisers': advisers,\n 'architects': architects, 'engineers': engineers, 'previous_architects': previous_architects,\n 'previous_engineers': previous_engineers,'can_edit': can_edit})\n else:\n return render(request, \"about.html\", {'funders': funders, 'advisers': advisers, 'architects': architects,\n 'engineers': engineers, 'previous_architects': previous_architects, 'previous_engineers': previous_engineers, 'can_edit': can_edit})", "def test_render_data_wrong_course(self):\n url = '{}?is_bigcourse=0'.format(reverse(\n 'completion_data_view', kwargs={\n 'course_id': 'course-v1:mss+MSS001+2019_2'}))\n self.response = self.staff_client.get(url)\n self.assertEqual(self.response.status_code, 404)", "def course_pages(self):\n return None", "def course_pages(self):\n return None", "def show_courses_and_profs(request):\r\n\tcourses = course_views.all_courses(request)\r\n\treturn render(request, 'templates/browse.html', {'courses': courses})", "def test_render_data_no_content_bigcourse(self):\n url = '{}?is_bigcourse=1'.format(reverse(\n 'completion_data_view', kwargs={\n 'course_id': self.course_no_content.id}))\n self.response = self.super_client.get(url)\n data = json.loads(self.response.content.decode())\n self.assertEqual(data['data'],[[False]])\n\n self.response = self.super_client.get(url)\n self.assertEqual(self.response.status_code, 200)\n data = json.loads(self.response.content.decode())\n self.assertEqual(len(data['data']), 12)\n self.assertEqual(data['data'][-1][0], self.student.username)\n self.assertEqual(data['data'][-1][1], '')\n self.assertEqual(data['data'][-1][2], self.student.email)\n self.assertEqual(data['data'][-1][3], '')", "def print_all_free_courses():\n all = Course.get_courses()\n print(\"All free courses\")\n for c in all:\n if len(c._students) >= c._total_place:\n continue\n c.course_info()\n print(\"----------\")", "def test_render_data_big_course(self):\n context_key = LearningContextKey.from_string(str(self.course.id))\n for item in self.items:\n usage_key = item.scope_ids.usage_id\n completion = models.BlockCompletion.objects.create(\n user=self.student,\n context_key=context_key,\n block_key=usage_key,\n completion=1.0,\n )\n url = '{}?is_bigcourse=1'.format(reverse(\n 'completion_data_view', kwargs={\n 'course_id': self.course.id}))\n self.response = self.staff_client.get(url)\n data = json.loads(self.response.content.decode())\n self.assertEqual(data['data'],[[False]])\n\n self.response = self.staff_client.get(url)\n self.assertEqual(self.response.status_code, 200)\n data = json.loads(self.response.content.decode())\n self.assertEqual(len(data['data']), 12)\n self.assertEqual(data['data'][-1][0], self.student.username)\n self.assertEqual(data['data'][-1][1], '')\n self.assertEqual(data['data'][-1][2], self.student.email)\n self.assertEqual(data['data'][-1][3], completion.modified.strftime(\"%d/%m/%Y, %H:%M:%S\"))", "def test_render_data_no_users_bigcourse(self):\n url = '{}?is_bigcourse=1'.format(reverse(\n 'completion_data_view', kwargs={\n 'course_id': self.course_no_user.id}))\n self.response = self.super_client.get(url)\n data = json.loads(self.response.content.decode())\n self.assertEqual(data['data'],[[False]])\n\n self.response = self.super_client.get(url)\n self.assertEqual(self.response.status_code, 200)\n data = json.loads(self.response.content.decode())\n self.assertEqual(data['data'], [[True]])", "def has_info_no_popular_courses(self):\n\n return self.is_displayed(locator=self.locators['popularinfo'])", "def achieve_viewall(request):\n is_loggedin, username = get_session_variables(request)\n contrib_list = []\n article_list = []\n gsoc_list = []\n speaker_list = []\n intern_list = []\n contest_participant_list = []\n icpc_participants_regional_list = []\n icpc_participants_final_list = []\n\n contrib_list_all = Contribution.objects.all()\n contrib_list = Contribution.objects.all()[:5]\n article_list = Article.objects.all()[:5]\n gsoc_list = Gsoc.objects.all()[:5]\n speaker_list = Speaker.objects.all()[:5]\n intern_list = Intern.objects.all()[:5]\n contest_list = Contest_won.objects.all()[:5]\n\n \n contrib_org = {}\n if contrib_list_all:\n for contrib in contrib_list_all:\n if contrib.org_name not in contrib_org.keys():\n contrib_org[contrib.org_name] = 0\n\n for contrib in contrib_list:\n contrib_org[contrib.org_name] += 1\n\n if contest_list:\t\n contest_participant_list = []\n\tfor contest_won_obj in contest_list:\t\n\t c_id = contest_won_obj.contest_id\n\t c_p_objs = Contest_won_participant.objects.filter(contest_id = c_id)\n\t contest_participant_list.extend(c_p_objs)\n \n icpc_list_regionals = ACM_ICPC_detail.objects.filter(level='regional').order_by('ranking')[:2]\n if icpc_list_regionals:\n for icpc_obj in icpc_list_regionals:\n team = icpc_obj.team_name\n member1 = [icpc_obj.participant1_name, \\\n get_username_from_email(icpc_obj.participant1_email)]\n\n member2 = [icpc_obj.participant2_name, \\\n get_username_from_email(icpc_obj.participant2_email)]\n\n member3 = [icpc_obj.participant3_name, \\\n get_username_from_email(icpc_obj.participant3_email)]\n\n icpc_participant_list = [icpc_obj, member1,member2,member3]\n icpc_participants_regional_list.append(icpc_participant_list)\n\n icpc_list_finals = ACM_ICPC_detail.objects.filter(level='finals').order_by('ranking')[:2]\n if icpc_list_finals:\n for icpc_obj in icpc_list_finals:\n team = icpc_obj.team_name\n member1 = [icpc_obj.participant1_name, \\\n get_username_from_email(icpc_obj.participant1_email)]\n\n member2 = [icpc_obj.participant2_name, \\\n get_username_from_email(icpc_obj.participant2_email)]\n\n member3 = [icpc_obj.participant3_name, \\\n get_username_from_email(icpc_obj.participant3_email)]\n\n icpc_participant_list = [icpc_obj, member1,member2,member3]\n icpc_participants_final_list.append(icpc_participant_list)\n\n return render_to_response('achievement/achievement_viewall.html',\\\n\t\t{'username':username, \\\n 'is_loggedin':is_loggedin, \\\n 'contrib_list':contrib_list, \\\n 'contrib_org':contrib_org,\\\n 'article_list':article_list, \\\n 'gsoc_list':gsoc_list, \\\n 'speaker_list':speaker_list, \\\n 'intern_list':intern_list, \\\n 'contest_list':contest_list, \\\n 'contest_participant_list':contest_participant_list, \\\n 'icpc_participants_final_list':icpc_participants_final_list, \\\n 'icpc_participants_regional_list':icpc_participants_regional_list}, \\\n RequestContext(request))", "def detail_course(request, pk, template=\"core/detail_course.html\"):\n response = {\n 'course': get_object_or_404(Course, pk=pk)\n }\n return direct_to_template(request, template, response)", "def courses(request):\r\n courses = get_courses(request.user, request.META.get('HTTP_HOST'))\r\n courses = sort_by_announcement(courses)\r\n\r\n return render_to_response(\"courseware/courses.html\", {'courses': courses})", "def validate_student_limit(course_id):\n course = get_course_by_id(course_id)\n student_count = len(get_students_by_course(course_id))\n\n if course.studentLimit != 0 and student_count >= course.studentLimit:\n return False\n else:\n return True", "async def displayCourseOfTheGame(self):\n print(\"display course of the game...\")\n course = \"\"\n for message in self.courseOfTheGame:\n if len(course + message) >= 2000: # Limit by discord\n await self.textChannel.send(course)\n course = \"\"\n course += message\n if course != \"\":\n await self.textChannel.send(course)\n print(\"displayed.\")", "async def _check_n_entries(self, ctx: commands.Context, number_of_people_to_display: int) -> int:\n max_entries = AocConfig.leaderboard_max_displayed_members\n author = ctx.message.author\n if not 0 <= number_of_people_to_display <= max_entries:\n log.debug(\n f\"{author.name} ({author.id}) attempted to fetch an invalid number \"\n f\" of entries from the AoC leaderboard ({number_of_people_to_display})\"\n )\n await ctx.send(\n f\":x: {author.mention}, number of entries to display must be a positive \"\n f\"integer less than or equal to {max_entries}\\n\\n\"\n f\"Head to {self.private_leaderboard_url} to view the entire leaderboard\"\n )\n number_of_people_to_display = max_entries\n\n return number_of_people_to_display", "def test_render_data_with_rut_big_course(self):\n try:\n from unittest.case import SkipTest\n from uchileedxlogin.models import EdxLoginUser\n except ImportError:\n self.skipTest(\"import error uchileedxlogin\")\n edxlogin = EdxLoginUser.objects.create(user=self.student, run='000000001K')\n context_key = LearningContextKey.from_string(str(self.course.id))\n for item in self.items:\n usage_key = item.scope_ids.usage_id\n completion = models.BlockCompletion.objects.create(\n user=self.student,\n context_key=context_key,\n block_key=usage_key,\n completion=1.0,\n )\n url = '{}?is_bigcourse=1'.format(reverse(\n 'completion_data_view', kwargs={\n 'course_id': self.course.id}))\n self.response = self.staff_client.get(url)\n data = json.loads(self.response.content.decode())\n self.assertEqual(data['data'],[[False]])\n\n self.response = self.staff_client.get(url)\n self.assertEqual(self.response.status_code, 200)\n data = json.loads(self.response.content.decode())\n self.assertEqual(len(data['data']), 12)\n self.assertEqual(data['data'][-1][0], self.student.username)\n self.assertEqual(data['data'][-1][1], edxlogin.run)\n self.assertEqual(data['data'][-1][2], self.student.email)\n self.assertEqual(data['data'][-1][3], completion.modified.strftime(\"%d/%m/%Y, %H:%M:%S\"))", "def display_courses(courses):\n\n _print('You can access %d courses' % len(courses))\n for i, course in enumerate(courses, 1):\n _print('%d - [%s] - %s' % (i, course.state, course.name))", "def get_courses(db: Session = Depends(get_db)): # , _: models.User = Depends(get_current_user))\n return crud.course.get_multi(db, skip=0, limit=100)", "def test_course_members_fails_overlimit(self):\n with self.assertRaises(OverEnrollmentLimitException):\n get_course_members(self.course.id)", "def test_course_filter(self):\n user = self.make_user()\n enrollment = EnrollmentFactory(grade_level__school_year__school=user.school)\n student = enrollment.student\n course_1 = CourseFactory(grade_levels=[enrollment.grade_level])\n course_2 = CourseFactory(grade_levels=[enrollment.grade_level])\n GradeFactory(student=student, graded_work__course_task__course=course_1)\n GradeFactory(student=student, graded_work__course_task__course=course_2)\n url = self.reverse(\"reports:progress\", pk=enrollment.id)\n url += f\"?course={course_1.id}\"\n\n with self.login(user):\n self.get_check_200(url)\n\n assert len(self.get_context(\"courses\")) == 1", "def check_how_much_to_paid(self, cells_number):\n if len(self.owner.countries[self.section]) == 2:\n return 10 * cells_number * 10000\n return 4 * cells_number * 10000", "def validate_new_curriculum_courses(self, curriculum_courses):\n\n for cur in curriculum_courses:\n # check to make sure its in the general courses table\n self.db_cursor.execute(\"\"\"SELECT COUNT(*) FROM Course WHERE name = %s\"\"\", (cur,))\n ct = self.db_cursor.fetchone()\n ct = ct[0]\n if ct == 0:\n print(\"course does not exist, we must create new one or cancel\") # todo\n\n return True", "def _accessible_courses_list(request):\r\n courses = modulestore('direct').get_courses()\r\n\r\n # filter out courses that we don't have access to\r\n def course_filter(course):\r\n \"\"\"\r\n Get courses to which this user has access\r\n \"\"\"\r\n if GlobalStaff().has_user(request.user):\r\n return course.location.course != 'templates'\r\n\r\n return (has_course_access(request.user, course.id)\r\n # pylint: disable=fixme\r\n # TODO remove this condition when templates purged from db\r\n and course.location.course != 'templates'\r\n )\r\n courses = filter(course_filter, courses)\r\n return courses", "def is_long_book(book):\n return book.number_of_pages >= 600", "def test_render_data_course_empty_bigcourse(self):\n url = '{}?is_bigcourse=1'.format(reverse(\n 'completion_data_view', kwargs={\n 'course_id': self.course_empty.id}))\n self.response = self.super_client.get(url)\n data = json.loads(self.response.content.decode())\n self.assertEqual(data['data'],[[False]])\n\n self.response = self.super_client.get(url)\n self.assertEqual(self.response.status_code, 200)\n data = json.loads(self.response.content.decode())\n self.assertEqual(data['data'], [[True]])", "def more(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'more.html',\n { 'is_longdale_user': user_is_ingroup(request, 'longdale_user'),\n 'title':'More',\n 'year':datetime.now().year,\n }\n )" ]
[ "0.6131107", "0.5759475", "0.555766", "0.5475429", "0.5444028", "0.5432407", "0.5432407", "0.5407837", "0.53872633", "0.534116", "0.5268757", "0.5227538", "0.5218553", "0.5216301", "0.5184439", "0.5172156", "0.5165793", "0.5139287", "0.5070918", "0.5049757", "0.5048851", "0.50236017", "0.50172776", "0.50092536", "0.49827272", "0.4972862", "0.4970433", "0.49692407", "0.49645692", "0.49631667" ]
0.6978592
0
The blog posts written by a person should appear on this person's detail page.
def test_templates_person_detail_related_blog_posts(self): user = UserFactory(is_staff=True, is_superuser=True) self.client.login(username=user.username, password="password") person = PersonFactory() blog_post = BlogPostFactory(fill_author=[person]) url = person.extended_object.get_absolute_url() response = self.client.get(url) html = lxml.html.fromstring(response.content) # The blog post should be present on the page title = html.cssselect("h3.blogpost-glimpse__title")[0] self.assertEqual( title.text_content().strip(), blog_post.extended_object.get_title() )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def view_blog(self):", "def post_detail_blog(request, blog_pk):\n # recuperar el post\n # recupera posts\n posts = Post.objects.order_by('-created_at').filter(owner=blog_pk)\n\n # prepara el contexto de la plantilla\n context = {\n 'post_objects': posts\n }\n\n # renderiza y devuelve la plantilla\n return render(request, 'blogs/inicio.html', context)", "def community_post_detail_view(request, slug):\n post = CommunityPostModel.objects.get(slug=slug) # Get the post\n posts = CommunityPostModel.objects.all()[:3] # Get the latest 3 posts\n author = UserModel.objects.get(id=post.author.id) # Get the author\n\n my_article = False # Set the flag to false\n if request.user == post.author: # If the user is the author, set the flag to true\n my_article = True # Set the flag to true\n\n context = { # Pass the variables to the template\n 'post': post,\n 'latest_posts': posts,\n 'author': author,\n 'my_article': my_article,\n }\n return render(request,\n 'pages/patient-community/community-post-details.html',\n context) # render the patient community detail page", "def get_posts(self): #return list of posts that are associated with this blog_id\n return Post.find_posts_for_blog_id(self.blog_id) #this will return a list of posts objects", "def post(request, blog, urlname):\n\tif request.user.is_authenticated():\n\t\tblog = Blog.qa_objects.get(urlname=blog)\n\t\tpost = BlogEntry.qa_objects.get(blog=blog, urlname=urlname)\n\t\tposts = BlogEntry.qa_objects.filter(blog=blog).order_by('-posting_time')[:5]\n\t\tblogs = Blog.qa_objects.order_by('name')\n\telse:\n\t\tblog = Blog.objects.get(urlname=blog)\n\t\tpost = BlogEntry.objects.get(blog=blog, urlname=urlname)\n\t\tposts = BlogEntry.objects.filter(blog=blog).order_by('-posting_time')[:5]\n\t\tblogs = Blog.objects.order_by('name')\n\tfyi = Article.objects.filter(news_type='FYI').order_by('-posting_time')[:5]\t\n\treturn render_to_response('blogs/post.html', {'blog': blog, 'post': post, 'posts': posts, 'fyi': fyi, 'blogs': blogs}, context_instance=RequestContext(request))", "def post_list(request):\n # Only show the posts that have been published\n posts = Post.objects.filter(date_published__isnull=False)\n return render(request,\n 'blog/post_list.html',\n {'posts': posts}\n )", "def author_posts(request, author_id):\n id = int(author_id)\n user = myUser.objects.get(user_id=id)\n if user.is_admin:\n posts = Post.objects.select_related('author').order_by('-modified')\n else:\n posts = Post.objects.select_related('author').filter(author_id=id).order_by('-modified')\n\n return render(request, 'posts/authors.html',\n {'posts': posts})", "def get_posts(self):\n return self.blog_posts.all()", "def education_post_list(request):\n posts = EducationBlogPost.objects.filter(published_date__lte=timezone.now()\n ).order_by('-published_date')\n return render(request, \"education_center/education_blogposts.html\", {'posts': posts})", "def show_blog_list():\r\n\tblog_list = Page.objects.filter(page_type=3).order_by('-created')[:4]\r\n\treturn {'blog_list': blog_list}", "def get(self, request):\n\n # recupera posts\n posts = Post.objects.filter(owner__in=request.user.owned_blogs.all())\n\n # prepara el contexto de la plantilla\n context = {\n 'post_objects': posts\n }\n\n # renderiza y devuelve la plantilla\n return render(request, 'blogs/inicio.html', context)", "def render_posts(self, **params):\n\n if \"user_posts\" in params:\n posts = params['user_posts']\n else:\n posts = Post.get_all()\n\n rendered_posts = \"\"\n for post in posts:\n rendered_posts += self.render_post(post, **params)\n\n self.render(\"blog/blog.html\", rendered_posts=rendered_posts)", "def post_detail(request, post_pk, blog_name):\n # recuperar el post\n try:\n post = Post.objects.select_related().get(pk=post_pk)\n except Post.DoesNotExist:\n return render(request, '404.html', {}, status=404)\n except Post.MultipleObjectsReturned:\n return HttpResponse(\"Existen varios posts con ese identificador\", status=300)\n\n # preparar el contexto\n context = {\n 'post': post\n }\n\n # renderizar la plantilla\n\n return render(request, 'blogs/post-detail.html', context)", "def get(self, request):\n\n # recupera blogs\n blogs = Blog.objects.filter(owner=request.user)\n\n # prepara el contexto de la plantilla\n context = {\n 'blog_objects': blogs,\n 'pon_nombre': 'ponloponlo'\n }\n\n # renderiza y devuelve la plantilla\n return render(request, 'blogs/blogs.html', context)", "def get_queryset(self):\n author = self.kwargs['author']\n target_author = get_object_or_404(Blog, author=author)\n return Blog.objects.filter(author=target_author)", "def bloggerView(request):\n blogger = request.user.blogger\n posts = blogger.post_set.all()\n paginator = Paginator(posts, 6) # Show 6 posts per page.\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n\n context = {\n 'blogger': blogger,\n 'posts': posts,\n 'page_obj': page_obj,\n }\n return render(request, 'blog/blogger.html', context)", "def post(self):\n subject = self.request.get('subject')\n content = self.request.get('post_content')\n post_id = self.request.get('post_id')\n post = Posts.get_by_id(int(post_id))\n user = self.get_active_user()\n user_id = int(user.key().id())\n\n if post and user and subject and content:\n if post.submitter_id == user_id:\n self.render_editpage(user, post_id, subject, content)\n else:\n self.render_improper_access()\n else:\n self.error(500)", "def get_queryset(self):\r\n\r\n user = get_object_or_404(User, username=self.kwargs.get('username'))\r\n return Post.objects.filter(author=user).order_by('-date_posted')", "def display_blogcontent(request):\n\n blogposts = BlogPost.objects.all()\n template = 'blog/blog.html'\n context = {\n 'blogposts': blogposts,\n }\n\n return render(request, template, context)", "def latest_blog_posts(self, request, *args, **kwargs):\n context = self.get_context(request, *args, **kwargs)\n context[\"latest_posts\"] = MyblogDetailPage.objects.live().public()[:1] \n return render(request, \"myblog/latest_posts.html\", context)", "def bloggerVisitView(request, pk):\n blogger = Blogger.objects.get(id=pk)\n posts = blogger.post_set.all()\n paginator = Paginator(posts, 6) # Show 6 posts per page.\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n\n context = {\n \"blogger\": blogger,\n \"posts\": posts,\n \"page_obj\": page_obj,\n }\n return render(request, \"blog/blogger_visit.html\", context)", "def all_blogs(request):\n\n posts = Post.objects.all()\n\n context = {\n 'posts': posts\n }\n\n return render(request, 'blog/blog.html', context)", "def author_profile(request, pk):\n author = get_object_or_404(User, pk=pk)\n profileposts = ProfilePost.objects.filter(user=author).filter(published_date__lte=timezone.now()\n ).order_by('-published_date').all()\n return render(request, 'profile.html', {\"profile\": author, 'profileposts': profileposts})", "def blog(request):\n\tlatest_posts = Post.objects.all().order_by('-created_at')\n\tpopular_posts = Post.objects.all().order_by('-views')[:5]\n\tfor post in latest_posts:\n\t\tpost.url = encode_url(post.title)\n\tfor popular_post in popular_posts:\n\t\tpopular_post.url = encode_url(popular_post.title)\n\treturn render(request, 'blog/blog.html', {'latest_posts': latest_posts, \n\t\t\t\t\t\t\t\t\t\t\t 'popular_posts': popular_posts})", "def is_new_for(post, user):\n return NewBlog.objects.filter(user=user, post=post)", "def __len__(self):\n return len(self._blogposts)", "def get_queryset(self):\n id = self.kwargs['pk']\n target_author=get_object_or_404(Author, pk = id)\n return Post.objects.filter(author=target_author)", "def get_recent_posts(self, request, count):\n if request.has_permission('edit'):\n return DBSession.query(Post).filter_by(blog=self).order_by('created desc').slice(0, count).all()\n else:\n return DBSession.query(Post).filter_by(blog=self, published=True).order_by('created desc').slice(0, count).all()", "def get_permisson_object(self):\n return self.blog", "def get_permisson_object(self):\n return self.blog" ]
[ "0.6633275", "0.64074415", "0.62881434", "0.623036", "0.6202774", "0.6185763", "0.61564267", "0.61334616", "0.60681987", "0.6010772", "0.6004283", "0.599102", "0.59621155", "0.59385824", "0.5935328", "0.5891563", "0.5883217", "0.58808655", "0.58712953", "0.5863682", "0.58603257", "0.5834271", "0.58299804", "0.5798462", "0.578055", "0.5778603", "0.57770884", "0.57689923", "0.5743141", "0.5743141" ]
0.7080314
0
A person template page without attached person should show an error banner explaining to the user that he/she is misusing the template.
def test_template_person_detail_without_person(self): page = PageFactory( template="courses/cms/person_detail.html", title__language="en", should_publish=True, ) with self.assertTemplateUsed( "courses/cms/fragment_error_detail_template_banner.html" ): response = self.client.get(page.get_absolute_url()) self.assertEqual(response.status_code, 200) self.assertContains( response, ( '<div class="banner banner--error banner--rounded" role="alert">' '<svg class="banner__icon" aria-hidden="true"><use href="#icon-cross" /></svg>' '<p class="banner__message">' "A person object is missing on this person page. " "Please select another page template." "<br />" "If what you need is a person page, you need to create it " 'via the wizard and choose "New person page".' "</p>" "</div>" ), html=True, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def errorview(request):\n \n #return a page indicating an error has occured\n return render(request, 'SmellGuessTemplate/error.html')", "def page_not_found(error):\r\n return render_template('error_template.html' , title = \"404 bud\", \r\n \t\t\t\t\t\t\t\t\t\t\t\tmessage = \"Time to make the chimi-fuckin'-changas. \",\r\n \t\t\t\t\t\t\t\t\t\t\t\tsubline = \"404, not there\", \r\n \t\t\t\t\t\t\t\t\t\t\t\timage_location = url_for('static', filename = 'images/deadpool-funny.jpg') ), 404", "def page_not_found(er): \n return render_template('errors.html'), 400", "def page_not_found(er):\n return render_template('errors.html'), 500", "def page_error(e):\n\n return render_template('404.html')", "def _page_not_found():\n return render_template(\n \"error.html\",\n title=\"Page Not Found\"\n ), 404", "def error():\n return render_template(\"404.html\")", "def error_page(e):\n \n return render_template('error-page.html'), 404", "def page_not_found(er):\n return render_template('errors.html'), 404", "def page_not_found():\n return render_template(\"errors/404.html\"), 404", "def bad_request(error):\r\n\treturn render_template('error_template.html' , title = \"Aaaah ...\", \r\n\t\t\t\t\t\t\t\t\t\t\t\t\tmessage = \"나는 이해하지 못한다.\",\r\n \t\t\t\t\t\t\t\t\t\t\t\tsubline = \"Yeah, the server couldn't understand what you asked for, probably because you didn't give a choice of download.\", \r\n \t\t\t\t\t\t\t\t\t\t\t\timage_location = url_for('static', filename = 'images/simpson-gangam.jpg')), 400", "def page_not_found(error):\n return render_template('error.html', error_msg=\"404 Page Not Found\", pagetitle=\"404 Page Not Found\"), 404", "def render_mainpageerror(errormsg) -> 'html':\n return render_template('mainpage.html',\n title='Word association',\n analyse=errormsg)", "def test_nosuch_detail(self):\n\t\tresponse = self.client.get(\"/post/2/\")\n\t\tself.assertEqual(response.status_code, 404)\n\t\t# We got an error before trying to use a template,\n\t\t# so no template was accessed\n\t\t###print(f\"@@@ {response}\")\n\t\t###print(f\"@@@ @@@ template name: {response.template_name}\")\n\t\t###self.assertTemplateUsed(response, None)\n\t\tself.assertFalse(hasattr(response, \"template_name\"))", "def page_not_found(error):\n return render_template(\"page_not_found.html\"), 404", "def error():\n return render_template(\"error.html\", **locals())", "def page_not_found(error):\n\n return render_template('/errors/404.html'), 404", "def testNonExistantTemplate(self):\n try:\n self.service.get_template(\"Non_Existant\")\n self.fail(\"No ServiceError raised by non-existant template\")\n except ServiceError as ex:\n self.assertEqual(\n ex.message,\n \"There is no template called 'Non_Existant' at this service\")", "def page_not_found(e):\n return render_template('404.html')", "def page_not_found(e):\n return render_template(\"404.html\", page_title=404)", "def page_not_found(e):\n return render_template(\"500.html\"), 500", "def page_not_found(_error):\n return render_template('404.html'), 404", "def not_found_error(error):\n return render_template('errors/404.html'), 404", "def page_not_found(e):\n return render_template(\"404.html\"), 404", "def err404():\n return render_template('404.html', year=datetime.now().year)", "def page_not_found(e):\n return render_template('404.html'), 404", "def page_not_found(e):\n return render_template('404.html'), 404", "def not_found(error):\n\n return render_template('errors/404.html'), 404", "def page_not_found(e):\n\n return render_template('404.html'), 404", "def page_not_found(e):\n return render_template(\"error/404.html\"), 404" ]
[ "0.6937663", "0.6863008", "0.68516415", "0.6752877", "0.6686156", "0.66346717", "0.6599722", "0.65577775", "0.6542976", "0.650337", "0.6459382", "0.6441446", "0.64403677", "0.64176124", "0.63976896", "0.63921285", "0.63720983", "0.63401914", "0.63311446", "0.626568", "0.62625873", "0.6227375", "0.62139726", "0.6195414", "0.6194705", "0.6184436", "0.6184436", "0.6183302", "0.61819506", "0.61724174" ]
0.7322566
0
The person meta description should show meta_description placeholder if defined
def test_templates_person_detail_meta_description(self): person = PersonFactory() page = person.extended_object title_obj = page.get_title_obj(language="en") title_obj.meta_description = "A custom description of the person" title_obj.save() page.publish("en") url = person.extended_object.get_absolute_url() response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertContains( response, '<meta name="description" content="A custom description of the person" />', )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_templates_person_detail_meta_description_empty(self):\n person = PersonFactory()\n page = person.extended_object\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n self.assertNotContains(\n response,\n '<meta name=\"description\"',\n )", "def _set_description(\n meta: Dict, description: Optional[Union[str, bool]] = None, **kwargs\n) -> Dict:\n if description is False or description is None:\n show_description_value = MetaWidget.DESCRIPTION_OPTION_NOTHING\n description = \"\"\n elif isinstance(description, str):\n show_description_value = MetaWidget.DESCRIPTION_OPTION_CUSTOM\n else:\n raise IllegalArgumentError(\n \"When using the add_card_widget or add_service_card_widget, 'description' must be \"\n \"'text_type' or None or False. Type is: {}\".format(type(description))\n )\n meta.update(\n {\n MetaWidget.SHOW_DESCRIPTION_VALUE: show_description_value,\n MetaWidget.CUSTOM_DESCRIPTION: description,\n }\n )\n return meta", "def test_templates_person_detail_meta_description_bio(self):\n person = PersonFactory()\n page = person.extended_object\n\n # Add a bio to a person\n placeholder = person.extended_object.placeholders.get(slot=\"bio\")\n add_plugin(\n language=\"en\",\n placeholder=placeholder,\n plugin_type=\"PlainTextPlugin\",\n body=\"A biographic description of the person\",\n )\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n self.assertContains(\n response,\n '<meta name=\"description\" content=\"A biographic description of the person\" />',\n )", "def test_templates_person_detail_meta_description_bio_exceeds_max_length(self):\n person = PersonFactory()\n page = person.extended_object\n placeholder_value = (\n \"Long description that describes the page with a summary. \"\n \"Long description that describes the page with a summary. \"\n \"Long description that describes the page with a summary. \"\n )\n\n # Add a bio to a person\n placeholder = person.extended_object.placeholders.get(slot=\"bio\")\n add_plugin(\n language=\"en\",\n placeholder=placeholder,\n plugin_type=\"PlainTextPlugin\",\n body=placeholder_value,\n )\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n cut = placeholder_value[0:160]\n self.assertContains(\n response,\n f'<meta name=\"description\" content=\"{cut}\" />',\n )", "def test_templates_person_detail_open_graph_description_bio(self):\n person = PersonFactory()\n page = person.extended_object\n\n # Add a bio to a person\n placeholder = person.extended_object.placeholders.get(slot=\"bio\")\n add_plugin(\n language=\"en\",\n placeholder=placeholder,\n plugin_type=\"PlainTextPlugin\",\n body=\"A biographic description of the person\",\n )\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n self.assertContains(\n response,\n '<meta property=\"og:description\" content=\"A biographic description of the person\" />',\n )", "def get_meta_description(self):\n md = self.meta_description.replace(\"<title>\", self.title)\n return md.replace(\"<short-text>\", self.short_text)", "def get_meta_description(self, article):\r\n return self.get_meta_content(article.doc, \"meta[name=description]\")", "def set_description(self, desc: str) -> None:\n self.metadata.data[\"description\"] = desc", "def set_description(self):\n if 'description' not in self.data:\n if self.verbose:\n click.echo('Adding empty descriptions to root')\n self.data['description'] = ''", "def testDescription(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"description\")\n\n self.util.stringPropertyTest(self, dis_meta, \"description\")", "async def _forcedescription(self, ctx, *args):\n if len(args) < 2:\n await ctx.send(\"Include both a name and a description!\")\n return\n\n god = database.getGodName(args[0], ctx.guild.id)\n\n if god:\n desc = \"\"\n i = 1\n for arg in args:\n if i > 1:\n desc = desc + \" \" + arg\n i += 1\n desc.strip()\n\n if len(desc) > 100:\n await ctx.send(\"Keep the description under 100 chars, please.\")\n return\n\n database.setDesc(god.ID, desc)\n await ctx.send(\"Description set successfully!\")\n else:\n await ctx.send(\"No God found by that name!\")", "def get_meta_description(self):\n if self.meta_description:\n return self.meta_description\n # Extra strip() removes newlines, which can break some parsers\n desc = strip_tags(self.excerpt_html).strip()\n return desc", "def get_meta_description(self, language=None, fallback=True, version_id=None, force_reload=False):\n return self.get_title_obj_attribute(\"meta_description\", language, fallback, version_id, force_reload)", "def test_templates_person_detail_open_graph_description_bio_exceeds_max_length(\n self,\n ):\n person = PersonFactory()\n page = person.extended_object\n placeholder_value = (\n \"Long description that describes the page with a summary. \" * 7\n )\n\n # Add a bio to a person\n placeholder = person.extended_object.placeholders.get(slot=\"bio\")\n add_plugin(\n language=\"en\",\n placeholder=placeholder,\n plugin_type=\"PlainTextPlugin\",\n body=placeholder_value,\n )\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n cut = placeholder_value[0:200]\n self.assertContains(\n response,\n f'<meta property=\"og:description\" content=\"{cut}\" />',\n )", "def _parse_yaml_description(self, meta: Mapping):\n with contextlib.suppress(Exception):\n yaml_md = yaml.safe_load(meta[\"description\"])\n # YAML could be anything: plain string, list, …\n if isinstance(yaml_md, dict):\n meta.pop(\"description\")\n meta.update(yaml_md)", "def _description(self):\n return None", "def _set_description(self):\n if self._report_key == ReportTypes.MHR_REGISTRATION:\n description = self._report_data.get('description')\n if description and description.get('rebuiltRemarks'):\n description['rebuiltRemarks'] = markupsafe.Markup(description['rebuiltRemarks'])\n if description and description.get('otherRemarks'):\n description['otherRemarks'] = markupsafe.Markup(description['otherRemarks'])", "def getMetaDescription(self, article):\n return self.getMetaContent(article.doc, \"meta[name=description]\")", "def description():", "def description(self, newDescription=None):\n pass", "def description(self, description):\n\n self._set_field(\"description\", description)", "def add_meta(self, post, *args, **kwargs):\n\t\tsuper(Command, self).add_meta(post, *args, **kwargs)\n\t\tpost.gen_description = False\n\t\tpost.description = description_from_content(post)\n\t\tpost.save()", "def get_description(self):", "def test_templates_person_detail_open_graph_description_empty(self):\n person = PersonFactory()\n page = person.extended_object\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n self.assertNotContains(\n response,\n \"og:description\",\n )", "def description(self, value):\n self._update_values('description', value)", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")" ]
[ "0.7493067", "0.73539066", "0.7327169", "0.6953486", "0.6882343", "0.68740034", "0.6779088", "0.6736526", "0.6641838", "0.66232705", "0.66081226", "0.65941656", "0.65928084", "0.65557325", "0.6542866", "0.65394235", "0.65185213", "0.6508125", "0.64476067", "0.64332944", "0.6432259", "0.63824165", "0.6369505", "0.6356088", "0.63408005", "0.63384324", "0.63384324", "0.63384324", "0.63384324", "0.63384324" ]
0.747935
1
The person meta description should show the bio if no meta_description is specified
def test_templates_person_detail_meta_description_bio(self): person = PersonFactory() page = person.extended_object # Add a bio to a person placeholder = person.extended_object.placeholders.get(slot="bio") add_plugin( language="en", placeholder=placeholder, plugin_type="PlainTextPlugin", body="A biographic description of the person", ) page.publish("en") url = person.extended_object.get_absolute_url() response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertContains( response, '<meta name="description" content="A biographic description of the person" />', )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_templates_person_detail_meta_description_bio_exceeds_max_length(self):\n person = PersonFactory()\n page = person.extended_object\n placeholder_value = (\n \"Long description that describes the page with a summary. \"\n \"Long description that describes the page with a summary. \"\n \"Long description that describes the page with a summary. \"\n )\n\n # Add a bio to a person\n placeholder = person.extended_object.placeholders.get(slot=\"bio\")\n add_plugin(\n language=\"en\",\n placeholder=placeholder,\n plugin_type=\"PlainTextPlugin\",\n body=placeholder_value,\n )\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n cut = placeholder_value[0:160]\n self.assertContains(\n response,\n f'<meta name=\"description\" content=\"{cut}\" />',\n )", "def test_templates_person_detail_meta_description_empty(self):\n person = PersonFactory()\n page = person.extended_object\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n self.assertNotContains(\n response,\n '<meta name=\"description\"',\n )", "def test_templates_person_detail_open_graph_description_bio(self):\n person = PersonFactory()\n page = person.extended_object\n\n # Add a bio to a person\n placeholder = person.extended_object.placeholders.get(slot=\"bio\")\n add_plugin(\n language=\"en\",\n placeholder=placeholder,\n plugin_type=\"PlainTextPlugin\",\n body=\"A biographic description of the person\",\n )\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n self.assertContains(\n response,\n '<meta property=\"og:description\" content=\"A biographic description of the person\" />',\n )", "def test_templates_person_detail_meta_description(self):\n person = PersonFactory()\n page = person.extended_object\n\n title_obj = page.get_title_obj(language=\"en\")\n title_obj.meta_description = \"A custom description of the person\"\n title_obj.save()\n\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n self.assertContains(\n response,\n '<meta name=\"description\" content=\"A custom description of the person\" />',\n )", "def test_templates_person_detail_open_graph_description_bio_exceeds_max_length(\n self,\n ):\n person = PersonFactory()\n page = person.extended_object\n placeholder_value = (\n \"Long description that describes the page with a summary. \" * 7\n )\n\n # Add a bio to a person\n placeholder = person.extended_object.placeholders.get(slot=\"bio\")\n add_plugin(\n language=\"en\",\n placeholder=placeholder,\n plugin_type=\"PlainTextPlugin\",\n body=placeholder_value,\n )\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n cut = placeholder_value[0:200]\n self.assertContains(\n response,\n f'<meta property=\"og:description\" content=\"{cut}\" />',\n )", "def get_meta_description(self, article):\r\n return self.get_meta_content(article.doc, \"meta[name=description]\")", "def get_meta_description(self):\n if self.meta_description:\n return self.meta_description\n # Extra strip() removes newlines, which can break some parsers\n desc = strip_tags(self.excerpt_html).strip()\n return desc", "def get_meta_description(self):\n md = self.meta_description.replace(\"<title>\", self.title)\n return md.replace(\"<short-text>\", self.short_text)", "async def profile_description(self, ctx):\n profile = await self.cache.get_profile(ctx.author.id)\n embed = self.bot.theme.embeds.primary()\n embed.set_author(name=f\"{ctx.author.name}'s Profile Description\", icon_url=ctx.author.avatar_url)\n embed.description = profile.description\n await ctx.send(embed=embed)", "def _set_description(\n meta: Dict, description: Optional[Union[str, bool]] = None, **kwargs\n) -> Dict:\n if description is False or description is None:\n show_description_value = MetaWidget.DESCRIPTION_OPTION_NOTHING\n description = \"\"\n elif isinstance(description, str):\n show_description_value = MetaWidget.DESCRIPTION_OPTION_CUSTOM\n else:\n raise IllegalArgumentError(\n \"When using the add_card_widget or add_service_card_widget, 'description' must be \"\n \"'text_type' or None or False. Type is: {}\".format(type(description))\n )\n meta.update(\n {\n MetaWidget.SHOW_DESCRIPTION_VALUE: show_description_value,\n MetaWidget.CUSTOM_DESCRIPTION: description,\n }\n )\n return meta", "def getMetaDescription(self, article):\n return self.getMetaContent(article.doc, \"meta[name=description]\")", "async def _forcedescription(self, ctx, *args):\n if len(args) < 2:\n await ctx.send(\"Include both a name and a description!\")\n return\n\n god = database.getGodName(args[0], ctx.guild.id)\n\n if god:\n desc = \"\"\n i = 1\n for arg in args:\n if i > 1:\n desc = desc + \" \" + arg\n i += 1\n desc.strip()\n\n if len(desc) > 100:\n await ctx.send(\"Keep the description under 100 chars, please.\")\n return\n\n database.setDesc(god.ID, desc)\n await ctx.send(\"Description set successfully!\")\n else:\n await ctx.send(\"No God found by that name!\")", "def render_biography(self):\n return markdown2.markdown(self.biography)", "def biography_file(self, instance):\r\n try:\r\n return admin_display_file(instance.user.user_files, 'biography')\r\n except Exception:\r\n return mark_safe('<i class=\"fa fa-times-circle red\" aria-hidden=\"true\"></i>')", "def test_templates_person_detail_open_graph_description_empty(self):\n person = PersonFactory()\n page = person.extended_object\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n self.assertNotContains(\n response,\n \"og:description\",\n )", "def get_webpage_description(self, response):\n desc = response.xpath('//*/meta[@itemprop=\"description\"]/@content').extract_first()\n desc1 = response.xpath('//*/meta[@name=\"description\"]/@content').extract_first()\n desc_length = 50\n if desc1:\n return desc1[:desc_length].strip()\n else:\n if desc:\n return desc[:desc_length].strip()\n else:\n desc = response.xpath('//*/meta[@property=\"description\"]/@content').extract_first()\n if desc:\n return desc[:desc_length].strip()\n else:\n return \"\"", "async def description(ctx, bot: typing.Union[discord.Member, discord.User]):\n data = await make_request(\"https://www.motiondevelopment.top/api/v1.2/bots/\", bot.id)\n if not bot.bot:\n return await r(ctx, \"Not a bot.\")\n\n if len(data[\"Big_desc\"]) > 2000:\n desc = data[\"Big_desc\"][:2000] + \\\n f\"...\\n[View original page for full description](https://www.motiondevelopment.top/bots/{bot.id})\"\n else:\n desc = data[\"Big_desc\"]\n await em(ctx, embed=discord.Embed(color=0xfecdea, description=desc))", "def testDescription(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"description\")\n\n self.util.stringPropertyTest(self, dis_meta, \"description\")", "def get_description(self):", "def get_meta_description(self, language=None, fallback=True, version_id=None, force_reload=False):\n return self.get_title_obj_attribute(\"meta_description\", language, fallback, version_id, force_reload)", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")" ]
[ "0.75098807", "0.73679626", "0.7329358", "0.7161595", "0.7068236", "0.6558661", "0.6484242", "0.6474118", "0.6419864", "0.6413481", "0.63787854", "0.62688863", "0.6268348", "0.62623376", "0.6260252", "0.62513417", "0.62280905", "0.61857986", "0.6161864", "0.61527085", "0.6134955", "0.6134955", "0.6134955", "0.6134955", "0.6134955", "0.6134955", "0.6134955", "0.6134955", "0.6134955", "0.6134955" ]
0.7804819
0
The person meta description should be cut if it exceeds more than 160 caracters
def test_templates_person_detail_meta_description_bio_exceeds_max_length(self): person = PersonFactory() page = person.extended_object placeholder_value = ( "Long description that describes the page with a summary. " "Long description that describes the page with a summary. " "Long description that describes the page with a summary. " ) # Add a bio to a person placeholder = person.extended_object.placeholders.get(slot="bio") add_plugin( language="en", placeholder=placeholder, plugin_type="PlainTextPlugin", body=placeholder_value, ) page.publish("en") url = person.extended_object.get_absolute_url() response = self.client.get(url) self.assertEqual(response.status_code, 200) cut = placeholder_value[0:160] self.assertContains( response, f'<meta name="description" content="{cut}" />', )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_description(self):\n description = self.cleaned_data['description']\n if not re.match(r'[\\w{4}\\s*]+', description) or len(description) < 10:\n v_err('no_desc')\n return description", "def clean_bio(self):\n bio = BeautifulSoup(self.cleaned_data['bio'], \"html.parser\")\n char_num = len(bio.get_text().replace(' ', ''))\n print(char_num)\n if 0 < char_num < 10:\n raise forms.ValidationError('If you want to share bio, make it '\n '10 characters or longer')\n return self.cleaned_data['bio']", "def test_templates_person_detail_open_graph_description_bio_exceeds_max_length(\n self,\n ):\n person = PersonFactory()\n page = person.extended_object\n placeholder_value = (\n \"Long description that describes the page with a summary. \" * 7\n )\n\n # Add a bio to a person\n placeholder = person.extended_object.placeholders.get(slot=\"bio\")\n add_plugin(\n language=\"en\",\n placeholder=placeholder,\n plugin_type=\"PlainTextPlugin\",\n body=placeholder_value,\n )\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n cut = placeholder_value[0:200]\n self.assertContains(\n response,\n f'<meta property=\"og:description\" content=\"{cut}\" />',\n )", "def _shortened_description(self):\n limit = 150\n for ticket in self:\n d = html.fromstring((ticket.description or '').strip()\n or '&nbsp;').text_content()\n ticket.shortened_description = (d[:limit] + u'(…)'\n if len(d) > limit else d)", "def truncate_description(description):\n if len(description) <= 160 :\n return description\n\n cut_desc = \"\"\n character_counter = 0\n for i, letter in enumerate(description) :\n character_counter += 1\n if character_counter > 160 :\n if letter == ' ' :\n return cut_desc+\"...\"\n else :\n return cut_desc.rsplit(' ',1)[0]+\"...\"\n cut_desc += description[i]\n return cut_desc", "def __str__(self):\n len_title=75\n if len(self.description)>len_title:\n titlestring=self.description[:len_title] + '...'\n else:\n titlestring=self.description\n return titlestring", "def _check_description_count(self):\n\n for rec in self:\n if rec.description and len(rec.description)>50:\n raise except_orm(_('Warning!'),\n _(\"Description Lenght must be less than or equal to 50. \"))", "def chunk_long_description(desc: str, col_width: int = 120) -> str:\n desc_chunks = []\n chunk = \"\"\n for word in desc.split():\n new_chunk = f\"{chunk}{word.strip()} \"\n if len(new_chunk) >= col_width:\n desc_chunks.append(new_chunk)\n chunk = \"\"\n else:\n chunk = new_chunk\n\n delim = \"\\n\"\n desc_chunks.append(chunk)\n\n return delim.join(desc_chunks)", "def restrict_text(text):\n\n if len(text) > 400:\n\n return text[:400] + ' ...'\n \n return text", "def poem(desc: Any) -> str:\n desc = str(desc)\n\n if len(desc) < 23:\n return desc + ' ' * (23 - len(desc))\n else:\n return desc[:20] + '...'", "def large_text(self):\n pass", "def test_description_is_generated_from_long_desc_truncates(self):\r\n self.register()\r\n res = self.new_application(long_description=\"a\"*300)\r\n\r\n app = db.session.query(App).first()\r\n assert len(app.description) == 255, len(app.description)\r\n assert app.description[-3:] == '...'", "def truncate_title(title):\n return title if len(title) <= 70 else title[:70]+\"...\"", "def shortened_text(self, max_chars=50):\n if len(self.comment) > max_chars:\n return self.comment[:max_chars] + \"...\"\n else:\n return self.comment", "def __str__(self):\n if len(self.title) > 50:\n return self.title[:50] + \"...\"\n else:\n return self.title", "def test_long_description(question):\n assert \"description\" in question[\"instance\"]\n assert \"longDescription\" in question[\"instance\"]\n description = question[\"instance\"][\"description\"]\n longDescription = question[\"instance\"][\"longDescription\"]\n # there shouldn't be whitespace at the beginning or end\n assert longDescription.strip() == longDescription\n words = longDescription.split()\n # we should have at least five words\n assert len(words) >= 5\n # the first letter should be capitalized\n assert longDescription[0].isupper()\n # long description should end with a period\n assert longDescription.endswith(\".\")\n # long description should not have two periods at the end\n assert not longDescription.endswith(\"..\")\n # description should not be the same as long description\n assert longDescription != description", "def get_webpage_description(self, response):\n desc = response.xpath('//*/meta[@itemprop=\"description\"]/@content').extract_first()\n desc1 = response.xpath('//*/meta[@name=\"description\"]/@content').extract_first()\n desc_length = 50\n if desc1:\n return desc1[:desc_length].strip()\n else:\n if desc:\n return desc[:desc_length].strip()\n else:\n desc = response.xpath('//*/meta[@property=\"description\"]/@content').extract_first()\n if desc:\n return desc[:desc_length].strip()\n else:\n return \"\"", "def shortened_notes_text(self, max_chars=50):\n if len(self.notes) > max_chars:\n return self.notes[:max_chars] + \"...\"\n else:\n return self.notes", "def validateDescription(description):\n \n if not(description) or len(description.split()) < 5:\n return \"You must supply a description of at least 5 words.\"", "def shortened_text(self, max_chars=50):\n if len(self.text) > max_chars:\n return self.text[:max_chars] + \"...\"\n else:\n return self.text", "def remove_longer_words(text):\n return \" \".join([word for word in str(text).split() if len(word) <= 12])", "def short_description(self):\n description = self.description\n if description is not None:\n lines = description.splitlines()\n title = []\n for line in lines:\n line = line.strip()\n if line == \"\":\n if len(title) > 0:\n break\n else:\n title.append(line)\n description = \"\\n\".join(textwrap.wrap(\"\\n\".join(title), 80))\n\n return description", "def short_title_chars(self):\r\n if len(self.title) > settings.CAMPAIGN_SHORT_TITLE_LENGTH:\r\n return mark_safe(u'%s&hellip;' % self.title[:settings.CAMPAIGN_SHORT_TITLE_LENGTH])\r\n else:\r\n return self.title", "def short_comment(self):\n return self.comment if len(self.comment) <= 500 else self.comment[:497] + '...'", "def trim_display_field(self, value, max_length):\n if not value:\n return ''\n if len(value) > max_length:\n return value[:max_length - 3] + '...'\n return value", "def test_too_short_description(superuser):\n form = RegisterForm(superuser, name='Client',\n description='C',\n is_confidential=choice([True, False]),\n redirect_uris='http://localhost/',\n default_scopes='read write')\n\n assert form.validate() is False\n assert _('Field must be between 3 and 350 characters long.') in form.description.errors", "def clean_user_desc(self):\n desc = self.data['user']['description']\n if desc is not None:\n desc = ' '.join(re.sub(\"(RT : )|(@[\\S]+)|(&\\S+)|(http\\S+)\", \" \", desc).split())\n desc = \" \".join(re.sub(\"(#\\S+)\", ' ', desc).split())\n desc = ''.join(list(filter(lambda x: x.isalpha() or x is ' ',\n desc))).replace(' ', ' ').replace(' ', ' ').lower().strip()\n return {'plain_desc': desc}", "def abrevia(self, summary, sumlenght):\n bb = ''\n\n if sumlenght < len(summary):\n bb = summary[:sumlenght]\n\n lastspace = bb.rfind(' ')\n cutter = lastspace\n precut = bb[0:cutter]\n\n if precut.count('<b>') > precut.count('</b>'):\n cutter = summary.find('</b>', lastspace) + 4\n bb = summary[0:cutter]\n if precut.count('<strong>') > precut.count('</strong>'):\n cutter = summary.find('</strong>', lastspace) + 9\n bb = summary[0:cutter]\n\n if bb.count('<p') > precut.count('</p'):\n bb += '...</p>'\n else:\n bb = bb + '...'\n else:\n bb = summary\n\n return bb", "def short_title_wrapped_for_badge(self):\r\n chars_per_line = self._badge_chars_per_line(self.title, settings.CAMPAIGN_BADGE_TITLE_FONT, settings.CAMPAIGN_BADGE_TITLE_FONT_SIZE)\r\n max_lines = settings.CAMPAIGN_BADGE_TITLE_MAX_LINES\r\n if len(self.title) > chars_per_line:\r\n wrapped = textwrap.wrap(self.title, chars_per_line)\r\n ellipsis_needed = len(wrapped) > max_lines\r\n wrapped = wrapped[:max_lines]\r\n if ellipsis_needed or (wrapped and (len(wrapped) == max_lines) and (len(wrapped[max_lines - 1]) > chars_per_line)):\r\n wrapped[max_lines - 1] = u'%s...' % wrapped[max_lines - 1][:-3]\r\n return wrapped\r\n else:\r\n return [self.title]", "def __str__(self):\n return str(self.description)[:10]" ]
[ "0.70148104", "0.7013429", "0.70095193", "0.69248194", "0.69162107", "0.66320646", "0.63486636", "0.6343417", "0.63398165", "0.6330294", "0.6316465", "0.63015544", "0.62774426", "0.6244998", "0.62317955", "0.62015456", "0.6111285", "0.60665274", "0.60504776", "0.6047837", "0.60343695", "0.59667665", "0.595058", "0.5910764", "0.5891507", "0.5869854", "0.58601916", "0.5856773", "0.582449", "0.5800107" ]
0.7297463
0
The person meta description should not be present if neither the meta_description field on the page, nor the `bio` placeholder are filled
def test_templates_person_detail_meta_description_empty(self): person = PersonFactory() page = person.extended_object page.publish("en") url = person.extended_object.get_absolute_url() response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertNotContains( response, '<meta name="description"', )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_templates_person_detail_meta_description_bio_exceeds_max_length(self):\n person = PersonFactory()\n page = person.extended_object\n placeholder_value = (\n \"Long description that describes the page with a summary. \"\n \"Long description that describes the page with a summary. \"\n \"Long description that describes the page with a summary. \"\n )\n\n # Add a bio to a person\n placeholder = person.extended_object.placeholders.get(slot=\"bio\")\n add_plugin(\n language=\"en\",\n placeholder=placeholder,\n plugin_type=\"PlainTextPlugin\",\n body=placeholder_value,\n )\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n cut = placeholder_value[0:160]\n self.assertContains(\n response,\n f'<meta name=\"description\" content=\"{cut}\" />',\n )", "def test_templates_person_detail_meta_description_bio(self):\n person = PersonFactory()\n page = person.extended_object\n\n # Add a bio to a person\n placeholder = person.extended_object.placeholders.get(slot=\"bio\")\n add_plugin(\n language=\"en\",\n placeholder=placeholder,\n plugin_type=\"PlainTextPlugin\",\n body=\"A biographic description of the person\",\n )\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n self.assertContains(\n response,\n '<meta name=\"description\" content=\"A biographic description of the person\" />',\n )", "def test_templates_person_detail_open_graph_description_bio_exceeds_max_length(\n self,\n ):\n person = PersonFactory()\n page = person.extended_object\n placeholder_value = (\n \"Long description that describes the page with a summary. \" * 7\n )\n\n # Add a bio to a person\n placeholder = person.extended_object.placeholders.get(slot=\"bio\")\n add_plugin(\n language=\"en\",\n placeholder=placeholder,\n plugin_type=\"PlainTextPlugin\",\n body=placeholder_value,\n )\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n cut = placeholder_value[0:200]\n self.assertContains(\n response,\n f'<meta property=\"og:description\" content=\"{cut}\" />',\n )", "def test_templates_person_detail_open_graph_description_bio(self):\n person = PersonFactory()\n page = person.extended_object\n\n # Add a bio to a person\n placeholder = person.extended_object.placeholders.get(slot=\"bio\")\n add_plugin(\n language=\"en\",\n placeholder=placeholder,\n plugin_type=\"PlainTextPlugin\",\n body=\"A biographic description of the person\",\n )\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n self.assertContains(\n response,\n '<meta property=\"og:description\" content=\"A biographic description of the person\" />',\n )", "def test_templates_person_detail_meta_description(self):\n person = PersonFactory()\n page = person.extended_object\n\n title_obj = page.get_title_obj(language=\"en\")\n title_obj.meta_description = \"A custom description of the person\"\n title_obj.save()\n\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n self.assertContains(\n response,\n '<meta name=\"description\" content=\"A custom description of the person\" />',\n )", "def test_missing_description(superuser):\n form = RegisterForm(superuser, name='Client',\n is_confidential=choice([True, False]),\n redirect_uris='http://localhost/',\n default_scopes='read write')\n\n assert form.validate() is False\n assert _('This field is required.') in form.description.errors", "def test_templates_person_detail_open_graph_description_empty(self):\n person = PersonFactory()\n page = person.extended_object\n page.publish(\"en\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n self.assertNotContains(\n response,\n \"og:description\",\n )", "def _set_description(\n meta: Dict, description: Optional[Union[str, bool]] = None, **kwargs\n) -> Dict:\n if description is False or description is None:\n show_description_value = MetaWidget.DESCRIPTION_OPTION_NOTHING\n description = \"\"\n elif isinstance(description, str):\n show_description_value = MetaWidget.DESCRIPTION_OPTION_CUSTOM\n else:\n raise IllegalArgumentError(\n \"When using the add_card_widget or add_service_card_widget, 'description' must be \"\n \"'text_type' or None or False. Type is: {}\".format(type(description))\n )\n meta.update(\n {\n MetaWidget.SHOW_DESCRIPTION_VALUE: show_description_value,\n MetaWidget.CUSTOM_DESCRIPTION: description,\n }\n )\n return meta", "def test_missing_description(self):\n self.check_validation_error(\"description\\n field required\", name=\"Name\")", "async def _forcedescription(self, ctx, *args):\n if len(args) < 2:\n await ctx.send(\"Include both a name and a description!\")\n return\n\n god = database.getGodName(args[0], ctx.guild.id)\n\n if god:\n desc = \"\"\n i = 1\n for arg in args:\n if i > 1:\n desc = desc + \" \" + arg\n i += 1\n desc.strip()\n\n if len(desc) > 100:\n await ctx.send(\"Keep the description under 100 chars, please.\")\n return\n\n database.setDesc(god.ID, desc)\n await ctx.send(\"Description set successfully!\")\n else:\n await ctx.send(\"No God found by that name!\")", "def test_form_field_has_correct_placeholder(self):\n\n self.assertEqual(\n self.form.fields[\"description\"].widget.attrs[\"placeholder\"],\n \"Company description\",\n )", "def test_template_person_detail_without_person(self):\n page = PageFactory(\n template=\"courses/cms/person_detail.html\",\n title__language=\"en\",\n should_publish=True,\n )\n\n with self.assertTemplateUsed(\n \"courses/cms/fragment_error_detail_template_banner.html\"\n ):\n response = self.client.get(page.get_absolute_url())\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(\n response,\n (\n '<div class=\"banner banner--error banner--rounded\" role=\"alert\">'\n '<svg class=\"banner__icon\" aria-hidden=\"true\"><use href=\"#icon-cross\" /></svg>'\n '<p class=\"banner__message\">'\n \"A person object is missing on this person page. \"\n \"Please select another page template.\"\n \"<br />\"\n \"If what you need is a person page, you need to create it \"\n 'via the wizard and choose \"New person page\".'\n \"</p>\"\n \"</div>\"\n ),\n html=True,\n )", "def get_meta_description(self):\n if self.meta_description:\n return self.meta_description\n # Extra strip() removes newlines, which can break some parsers\n desc = strip_tags(self.excerpt_html).strip()\n return desc", "def get_meta_description(self, article):\r\n return self.get_meta_content(article.doc, \"meta[name=description]\")", "def test_empty_description(self):\n self.check_validation_error('description\\n string does not match regex \".+\"', name=\"Name\", description=\"\")", "def get_webpage_description(self, response):\n desc = response.xpath('//*/meta[@itemprop=\"description\"]/@content').extract_first()\n desc1 = response.xpath('//*/meta[@name=\"description\"]/@content').extract_first()\n desc_length = 50\n if desc1:\n return desc1[:desc_length].strip()\n else:\n if desc:\n return desc[:desc_length].strip()\n else:\n desc = response.xpath('//*/meta[@property=\"description\"]/@content').extract_first()\n if desc:\n return desc[:desc_length].strip()\n else:\n return \"\"", "def _parse_yaml_description(self, meta: Mapping):\n with contextlib.suppress(Exception):\n yaml_md = yaml.safe_load(meta[\"description\"])\n # YAML could be anything: plain string, list, …\n if isinstance(yaml_md, dict):\n meta.pop(\"description\")\n meta.update(yaml_md)", "def testDescription(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"description\")\n\n self.util.stringPropertyTest(self, dis_meta, \"description\")", "def test_no_markup_type_field_if_set(self):\r\n self.assertTrue('markdown_field_markup_type' not in\r\n ArticleForm().fields.keys())", "def test_fb_meta_no_coupon(self):\n site = Site.objects.get(id=2)\n fb_meta_dict = build_fb_like_meta(site)\n self.assertEqual(fb_meta_dict['ref'], site.domain)\n self.assertTrue('Save money at the best' in fb_meta_dict['description'])\n self.assertTrue('get started today!' in fb_meta_dict['description'])\n self.assertEqual('Sign up for Hudson Valley Coupons.', \n fb_meta_dict['title'])", "def testDescription(self):\n place = Place()\n self.assertTrue(hasattr(place, \"description\"))\n self.assertEqual(place.description, \"\")", "def test_null_field(self):\r\n problem = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertIsNotNone(problem.markdown)\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'nullout': ['markdown']}\r\n )\r\n problem = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertIsNone(problem.markdown)", "def save(self, *args, **kwargs):\n if not self.protect_html:\n content_html, excerpt_html = self.generate_html()\n self.content_html = content_html\n self.excerpt_html = excerpt_html\n # Ensure that if meta_description appears empty to the user then it is\n # saved as an empty field. Prevents accidentally-inserted whitespace\n # from causing a blank description.\n self.meta_description = self.meta_description.strip()\n return super(Article, self).save(*args, **kwargs)", "def getMetaDescription(self, article):\n return self.getMetaContent(article.doc, \"meta[name=description]\")", "def set_description(self):\n if 'description' not in self.data:\n if self.verbose:\n click.echo('Adding empty descriptions to root')\n self.data['description'] = ''", "def clean_description(self):\n description = self.cleaned_data['description']\n if not re.match(r'[\\w{4}\\s*]+', description) or len(description) < 10:\n v_err('no_desc')\n return description", "def clean_user_desc(self):\n desc = self.data['user']['description']\n if desc is not None:\n desc = ' '.join(re.sub(\"(RT : )|(@[\\S]+)|(&\\S+)|(http\\S+)\", \" \", desc).split())\n desc = \" \".join(re.sub(\"(#\\S+)\", ' ', desc).split())\n desc = ''.join(list(filter(lambda x: x.isalpha() or x is ' ',\n desc))).replace(' ', ' ').replace(' ', ' ').lower().strip()\n return {'plain_desc': desc}", "async def set_profile_description(self, ctx, *, description: str):\n max_words = self.plugin.data.profile.max_description_length\n if len(description) > max_words:\n res = f\"{ctx.emotes.web_emotion.xx} Sorry but profile description cannot exceed {max_words} word limit.\"\n return await ctx.send_line(res)\n profile = await self.cache.get_profile(ctx.author.id)\n await profile.set_description(description)\n embed = self.bot.theme.embeds.primary(title=\"✅ Your Profile Description has been updated to:\")\n embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)\n embed.description = profile.description\n await ctx.send(\"\", embed=embed)", "def test_form_field_has_correct_placeholder(self):\n\n self.assertEqual(\n self.form.fields[\"flavour\"].widget.attrs[\"placeholder\"],\n \"Flavour profile\",\n )", "def test_too_short_description(superuser):\n form = RegisterForm(superuser, name='Client',\n description='C',\n is_confidential=choice([True, False]),\n redirect_uris='http://localhost/',\n default_scopes='read write')\n\n assert form.validate() is False\n assert _('Field must be between 3 and 350 characters long.') in form.description.errors" ]
[ "0.7431924", "0.7200077", "0.69734985", "0.6669384", "0.661611", "0.64824736", "0.63739985", "0.6300522", "0.6011192", "0.59699476", "0.5845052", "0.58380896", "0.58326775", "0.5792924", "0.5760811", "0.5732031", "0.5725326", "0.567543", "0.563398", "0.55857897", "0.5585393", "0.55821663", "0.5572202", "0.5544977", "0.5543813", "0.549803", "0.54950017", "0.5470219", "0.54700685", "0.5463154" ]
0.7527318
0
This function list neighbour entries attached to the given device
def showNeighboursByDevice(logger, device, version=None): args = [] if version: if version == 4: args.append(IpConstant.IPV4) elif version == 6: args.append(IpConstant.IPV6) args += [IpOption.NEIGHBOUR, IpAction.SHOW, IpConstant.DEV, device] option = args[0] cmd = args[1:] rc = Command.executeIp(logger, option, *cmd) return rc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_neighbors(self):\n #checked#\n ###your code here###\n for address in self.homes:\n for i in range(-1, 2):\n for j in range(-1,2):\n neighbor_address=(address[0]+i, address[1]+j)\n if neighbor_address in self.homes and neighbor_address!=address:\n self.homes[address].neighbors.append(self.homes[neighbor_address])", "def getNeighbourTable(logger, device, version=4):\n\n (rc, stdout, stderr) = IpNeighbour.showNeighboursByDevice(logger, device, version)\n\n if rc != 0:\n return None\n\n if logger:\n logger(\"neigh-table-get\").debug2(\"retrieving device %s neighbor table\", device, stdout)\n\n # example:\n # ipv4 --> 199.120.69.13 dev eth-tg8 lladdr 00:0d:66:33:dc:19 REACHABLE\n # ipv6 --> 2000::223:5eff:fe2d:cf81 dev eth-tg8 lladdr 00:23:5e:2d:cf:81 router STALE\n output = stdout.splitlines()\n\n return output", "def neighbors(lines, of):\n return [k for k, line in enumerate(lines) if line.touches(of)]", "def device_lldp_neighbors(device):\n show_file = device.show_outputs_dir.joinpath(\n 'show-lldp-neighbors.json')\n\n return json.load(show_file.open())", "def get_neighbours(self):\n return []", "def process(self, device, results, log):\n log.info('processing %s for device %s', self.name(), device.id)\n getdata, tabledata = results\n \n ltmnode_table = tabledata.get(\"ltmNodeAddrTable\")\n \n # Grab the second table and append it to the first\n status_table = tabledata.get(\"ltmNodeStatusTable\")\n for oid, data in status_table.items():\n for key, value in data.items():\n if key not in ltmnode_table[oid]:\n ltmnode_table[oid][key] = value\n \n maps = []\n rm = self.relMap()\n # Get the list of name patterns to search for\n node_name_filter = getattr(device, 'zF5BigipNodesNameFilter', None)\n log.debug(\"Picked up Filter List of: %s\" , node_name_filter)\n for oid, data in ltmnode_table.items():\n # log.debug(\"%s : %s\\n\", oid, data)\n #\n om = self.objectMap(data)\n binclude = True\n if node_name_filter != None and node_name_filter != \"\":\n # If there is a regex filter supplied, lets use it\n if re.search(node_name_filter, om.ltmNodeAddrScreenName) == None:\n binclude = False\n if binclude == True:\n # The value fetched is a packed hex representation of the IP\n # Try and unpack the address, and check if route_domains\n # are in use\n address, route_domain = unpack_address_to_string(oid, \n om.ltmNodeAddrAddr)\n if address != \"\":\n om.ltmNodeAddrAddr = address\n if route_domain != \"\":\n om.ltmNodeAddrRouteDomain = route_domain\n om.id = self.prepId(om.ltmNodeAddrAddr)\n om.snmpindex = oid\n\n om.ltmNodeAddrStatusEnabledState = \\\n enable_state_values[om.ltmNodeAddrStatusEnabledState]\n om.ltmNodeAddrStatusAvailState = \\\n avail_status_values[om.ltmNodeAddrStatusAvailState]\n rm.append(om)\n log.debug(rm)\n return [rm]", "def get_neighbours(self, cell):\n\t\tx,y = cell.find_id()\n\t\tlength = self.space.shape[1]\n\t\twidth = self.space.shape[0]\n\t\tif (length == 0 or width == 0 or x < 0 or x >= length or y < 0 or y >= width):\n\t\t\treturn []\n\t\tneighs = [(i,j) for i in range(y-1,y+2) if 0<=i<width for j in range(x-1,x+2) if 0<=j<length]\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tneighbours.append(self.space[neigh[0],neigh[1]])\n\t\treturn neighbours", "def get_neighbours(lat, long):\n # ns = north east, ew = east west (ratio between 1 feet and degree) \n # its different on diferent places on earth (sphere)!!\n ns = 0.0025\n ew = 0.0025\n walk = []\n for i in range(-2, 3):\n for j in range(-2, 3):\n thiscell = CellId.from_lat_lng(LatLng.from_degrees(lat + ns*i, long + ew*j)).parent(S2_CELL_LEVEL)\n if abs(i * j) < 4:\n walk.append(thiscell.id())\n return sorted(walk)", "def get_further_neighbours(self, cell):\n\t\tneighs = self.get_neighbours(cell)\n\t\ti, j = cell.find_id()\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tx, y = neigh.find_id()\n\t\t\tif abs(x-i)+abs(y-j) > 1 or abs(x-i)+abs(y-j) == 0: \n\t\t\t\tneighbours.append(self.space[y,x])\n\t\treturn neighbours", "def pd_king_neighbors(obj: PdObject) -> List[PdObject]:\n return [neighbor for tag, neighbor in pd_king_neighbors_and_self(obj) if tag]", "def get_neighbours(self, value):\n\t\tnode = self.get_node(value)\n\t\tneighbours = [key.value for key in node.edges.keys()]\n\t\treturn neighbours", "def get_framework_neighbours(atom, useH=True):\n neighbourlist = []\n for atom2 in atom.partner[:5]:\n #if not 'H(' in atom2.name and np.linalg.norm(atom.cart-atom2.cart)<=1.6:\n if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius[atom.element]) + float(\n covalence_radius[atom2.element]) + .1:\n if not 'H' == atom2.element or useH:\n neighbourlist.append(atom2)\n return neighbourlist", "def make_neighbor_list(self):\n nodeinfo = bytearray()\n\n # the node itself\n for item in self.neighbors.my_info.get_nodeinfo():\n nodeinfo.extend(item)\n count = 1\n\n # neighboring node\n for nd in self.neighbors.nodeinfo_list.keys():\n if self.neighbors.nodeinfo_list[nd].is_alive:\n count += 1\n for item in self.neighbors.nodeinfo_list[nd].get_nodeinfo():\n nodeinfo.extend(item)\n\n nodes = bytearray(count.to_bytes(4, 'big'))\n nodes.extend(nodeinfo)\n return bytes(nodes)", "def get_entries(self):\n return self.find_by_st(\"urn:schemas-denon-com:device:ACT-Denon:1\")", "def discovered_drones(self):\n return self.devices", "def get_neighbours(self):\n return self.points_to.keys()", "def Neighbors(vendor):\n neighbors = {\"cisco\" : \"\", \"juniper\" : \"\", \"vyatta\" : \"\" }\n cisco_neighbors = {}\n juniper_neighbors = {}\n vyatta_neighbors = {}\n while True:\n print \"***\\t\\t%s NEIGHBORS***\" % (vendor)\n n = raw_input(\"\\t\\tNeighbor information (Press any key to continue. Press 'q' to quit): \")\n if n is not 'q':\n neighbor_id = raw_input(\"\\t\\tNeighbor ID (eg. x.x.x.x): \")\n neighbor_as = raw_input(\"\\t\\tNeighbor AS: \")\n if vendor == \"cisco\":\n cisco_neighbors[neighbor_id] = neighbor_as\n neighbors[vendor] = cisco_neighbors\n elif vendor == \"juniper\":\n juniper_neighbors[neighbor_id] = neighbor_as\n neighbors[vendor] = juniper_neighbors\n else:\n vyatta_neighbors[neighbor_id] = neighbor_as\n neighbors[vendor] = vyatta_neighbors\n else:\n break\n return neighbors", "def find_devices (devicelist):\n vprint(\"\\nFind known devices:\")\n for device in devicelist:\n if find_device(device) is not None :\n vprint(\"\\tFound :\", device)\n else:\n vprint(\"\\tNOT found:\", device )\n vprint(\"..........\") \n return", "def neighbors(node, topology):\n return [n for n in topology[node]]", "def getNeighbor(self, neighborID):", "def get_devices():\n global managed_objects\n global devices_by_adr\n \n devices_by_adr = {}\n \n r = re.compile(\"\\/org\\/bluez\\/hci\\d*\\/dev\\_(.*)\")\n # e.g., match a string like this:\n # /org/bluez/hci0/dev_58_C9_35_2F_A1_EF\n \n for key, value in managed_objects.items():\n # print(\"key=\", key)\n m = r.match(key)\n if m is not None:\n dev_str = m.group(1) # we have a device string!\n # print(\"dev_str=\", dev_str)\n # let's flatten that dict a bit\n devices_by_adr[dev_str] = value[\"org.bluez.Device1\"]", "def get_neighbors(self):\n # return the neighbors\n # if len(self.neighbors.keys()) != 0:\n # # print('\\n')\n return self.neighbors.keys()\n # else:\n # # print(self.id)\n # raise ValueError('No neighbors')\n # return self.neighbors.keys()", "def get_bgp_neighbors(context, target):\n\n response = context.get_operation(\"get_bgp_neighbors\")\n neighbors = [ row[\"neighbor\"] for row in response ]\n return neighbors", "def get_closest_neighbours(atomlist, neighbours=2):\n print('atomlist', atomlist)\n neighbourlist = []\n for atom in atomlist:\n listline = [atom[0][0]]\n dists = []\n distsc = []\n for partner in atomlist:\n dists.append(np.linalg.norm(atom[1] - partner[1]))\n distsc.append(np.linalg.norm(atom[1] - partner[1]))\n dists.remove(min(dists))\n for _ in range(neighbours):\n if min(dists) < 2.5:\n listline.append(atomlist[distsc.index(min(dists))][0][0])\n dists.remove(min(dists))\n #listline.append(atomlist[distsc.index(min(dists))][0][0])\n neighbourlist.append(listline)\n return neighbourlist", "def fetch_router_list(args):\n nd = NetDevices(production_only=opts.nonprod)\n ret = []\n blocked_groups = []\n if args:\n for arg in args:\n # Try to find the device, but fail gracefully if it can't be found\n device = device_match(arg)\n if not pass_filters(device) or device is None:\n continue\n ret.append(device)\n\n else:\n for entry in nd.itervalues():\n if entry.owningTeam in blocked_groups:\n continue\n if not pass_filters(entry):\n continue\n ret.append(entry)\n\n return sorted(ret, reverse=True)", "def findNeighbor(cur, dataList, eps):\n neighbors = []\n for pt in dataList:\n if (cur.x - pt.x) ** 2 + (cur.y - pt.y) ** 2 <= eps ** 2:\n neighbors.append(pt)\n return neighbors", "def find_neighbors(index, bridges_dict) :\n neighbors = []\n for k in bridges_dict.keys() :\n if bridges_dict[k].lumen1 == index :\n neighbors += [bridges_dict[k].lumen2]\n elif bridges_dict[k].lumen2 == index :\n neighbors += [bridges_dict[k].lumen1]\n return neighbors", "def lldp_neighbour(self, output_str):\n\n date_time = get_date_time()\n connections = []\n top = 212\n # bottom = -33\n trimmed_str = output_str[top:]\n for line in trimmed_str.split('\\n'):\n line_content = line.split()\n local_port = line_content[1] + line_content[2]\n remote_device = line_content[0].split('.')[0]\n remote_port = line_content[8] + line_content[9]\n connections.append([self.device, local_port.strip(), remote_device.strip(), remote_port.strip(),\n date_time])\n\n connections_df = pd.DataFrame(connections,\n columns=['local_device', 'local_port', 'remote_device', 'remote_port',\n 'date_time'])\n return connections_df", "def get_neighbors(self):\n return self.neighbors.keys()", "def element_neighbors(self):\n if self.element_neighbors_data is not None:\n return self.element_neighbors_data\n\n max_nr_edges = self.header['element_infos'][0, 2]\n\n # initialize the neighbor array\n self.element_neighbors_data = []\n self.element_neighbors_edges = []\n\n # determine neighbors\n print('Looking for neighbors')\n time_start = time.time()\n for nr, element_nodes in enumerate(self.elements):\n # print('element {0}/{1}'.format(nr + 1, self.nr_of_elements))\n # print(element_nodes)\n neighbors = []\n neighbors_edges = [] # store the edges to this neighbor\n for nr1, el in enumerate(self.elements):\n # we look for elements that have two nodes in common with this\n # element\n intersection = np.intersect1d(element_nodes, el)\n if intersection.size == 2:\n neighbors.append(nr1)\n neighbors_edges.append(intersection)\n # stop if we reached the maximum number of possible edges\n # this saves us quite some loop iterations\n if len(neighbors) == max_nr_edges:\n break\n self.element_neighbors_data.append(neighbors)\n self.element_neighbors_edges.append(neighbors_edges)\n time_end = time.time()\n print('elapsed time: {} s'.format(time_end - time_start))\n return self.element_neighbors_data" ]
[ "0.6101965", "0.607459", "0.57528055", "0.57520497", "0.5742883", "0.5716349", "0.56814736", "0.5574797", "0.5566046", "0.5548959", "0.5539017", "0.5530729", "0.549348", "0.54754215", "0.547334", "0.5468345", "0.5450718", "0.5447196", "0.5421929", "0.54108864", "0.5410668", "0.54072815", "0.5394959", "0.537348", "0.5372063", "0.5368927", "0.5350439", "0.5324502", "0.5323025", "0.53008294" ]
0.63305134
0
This method defines the mesh points for a rectangle, i.e., the position of all mesh points, position of internal mesh ooints and the position of mesh points on the boundary The origin of a rectangular cooerdinate is set on the lower left vertex
def defineRectangleLayout(self): #--- Define a 2-D mesh --- # Divide x- and y-axis self.xPoints = self.frange(0,self.Lx,self.h) self.yPoints = self.frange(0,self.Ly,self.h) # Position (xy-coordinate) of boundary points boundary_xyCoord = [(0,j) for j in self.yPoints] + [(self.Lx,j) for j in self.yPoints] + [(i,0) for i in self.xPoints[1:len(self.xPoints)-1]] + [(i,self.Ly) for i in self.xPoints[1:len(self.xPoints)-1]] # Define the dictionary containing boundary points for k in boundary_xyCoord: x = k[0] y = k[1] xLabel = self.xPoints.index(x) yLabel = self.yPoints.index(y) self.boundaryPoints[(xLabel,yLabel)] = meshPoint(type = 'boundary',x = x, y = y, xLabel = xLabel, yLabel = yLabel) # Position (xy-coordinate) of internal mesh points internal_xyCoord = [(i,j) for i in self.xPoints[1:len(self.xPoints)-1] for j in self.yPoints[1:len(self.yPoints)-1]] # Define the dictionary containing internal points for k in internal_xyCoord: x = k[0] y = k[1] xLabel = self.xPoints.index(x) yLabel = self.yPoints.index(y) self.internalPoints[(xLabel,yLabel)] = meshPoint(type = 'internal',x = x, y = y, xLabel = xLabel, yLabel = yLabel) # Now that we have assigned the labels we can define fE, fW, fN and fS self.fCalc()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rectangle_mesh(point1=Point(0,0), point2=Point(2,1), numptsX=10, numptsY=5):\n\n mesh = RectangleMesh(point1, point2, numptsX, numptsY )\n print_mesh_stats(mesh)\n \n return mesh", "def defineCircleLayout(self):\n # Define a 2-D array representing the position of each mesh point\n self.xPoints = self.frange(0,self.R,self.h)\n self.yPoints = self.frange(0,self.R,self.h)\n\n # Position of internal mesh points\n internal_xyCoord = [(i,j) for i in self.xPoints for j in self.yPoints if (i - self.R)**2 + (j - self.R)**2 < self.R^2] \n\n # Define the dictionary containing internal points\n for k in internal_xyCoord:\n x = k[0]\n y = k[1]\n xLabel = xPoints.index(x)\n yLabel = yPoints.index(y)\n self.internalPoints[(xLabel,yLabel)] = meshPoint(type = 'internal',x = x, y = y, xLabel = xLabel, yLabel = yLabel) \n\n # Position of the boundary points\n # Find the intersection of each mesh line with the circle\n # For a given vertical mesh line: \n # y = R - sqrt(R^2 - (x-R)^2) & y = R + sqrt(R^2 - (x-R)^2)\n # For a given horizontal mesh line: \n # x = R - sqrt(R^2 - (y-R)^2) & x = R + sqrt(R^2 - (y-R)^2)\n boundary_xyCoord = [(0,self.R),(self.R,0),(self.R,2*self.R),(2*self.R,self.R)] + [(x,self.R - math.sqrt(self.R**2 - (x-self.R)**2)) for x in self.xPoints[1:len(self.xPoints)-1]] + [(x,self.R - math.sqrt(self.R**2 + (x-self.R)**2)) for x in self.xPoints[1:len(self.xPoints)-1]] + [(self.R - math.sqrt(self.R**2 - (y-self.R)**2),y) for y in self.yPoints[1:len(yPoints)-1]] + [(self.R + math.sqrt(self.R**2 - (y-self.R)**2),y) for y in self.yPoints[1:len(yPoints)-1]] \n\n # Define the dictionary containing boundary points\n for k in boundary_xyCoord:\n x = k[0]\n y = k[1]\n [xLabel,yLabel] = self.findLabel(x,y)\n self.boundaryPoints[(xLabel,yLabel)] = meshPoint(type = 'boundary',x = x, y = y, xLabel = xLabel, yLabel = yLabel) \n \n # Now that we have assigned the labels we can define fE, fW, fN and fS\n self.fCalc()", "def Rectangle(self,lower_left_point=(0,0), upper_right_point=(2,1),\n nx=5, ny=5, element_type=\"tri\"):\n\n if element_type != \"tri\" and element_type != \"quad\":\n raise ValueError(\"Element type should either be tri or quad\")\n\n if self.elements is not None and self.points is not None:\n self.__reset__()\n\n if (lower_left_point[0] > upper_right_point[0]) or \\\n (lower_left_point[1] > upper_right_point[1]):\n raise ValueError(\"Incorrect coordinate for lower left and upper right vertices\")\n\n nx, ny = int(nx), int(ny)\n if nx <= 0 or ny <= 0:\n raise ValueError(\"Number of discretisation cannot be zero or negative: nx={} ny={}\".format(nx,ny))\n\n\n\n from scipy.spatial import Delaunay\n\n x=np.linspace(lower_left_point[0],upper_right_point[0],nx+1)\n y=np.linspace(lower_left_point[1],upper_right_point[1],ny+1)\n\n X,Y = np.meshgrid(x,y)\n coordinates = np.dstack((X.ravel(),Y.ravel()))[0,:,:]\n\n if element_type == \"tri\":\n tri_func = Delaunay(coordinates)\n self.element_type = \"tri\"\n self.elements = tri_func.simplices\n self.nelem = self.elements.shape[0]\n self.points = tri_func.points\n self.nnode = self.points.shape[0]\n self.GetBoundaryEdgesTri()\n\n elif element_type == \"quad\":\n\n self.nelem = int(nx*ny)\n elements = np.zeros((self.nelem,4),dtype=np.int64)\n\n dum_0 = np.arange((nx+1)*ny)\n dum_1 = np.array([(nx+1)*i+nx for i in range(ny)])\n col0 = np.delete(dum_0,dum_1)\n elements[:,0] = col0\n elements[:,1] = col0 + 1\n elements[:,2] = col0 + nx + 2\n elements[:,3] = col0 + nx + 1\n\n self.nnode = int((nx+1)*(ny+1))\n self.element_type = \"quad\"\n self.elements = elements\n self.points = coordinates\n self.nnode = self.points.shape[0]\n self.GetBoundaryEdgesQuad()\n self.GetEdgesQuad()", "def boundary(self): # -> BaseGeometry:\n ...", "def update_rect(self):\n if self.vertex_list:\n self.vertex_list.vertices = (\n self._rect.bottom_left.tuple +\n self._rect.bottom_right.tuple +\n self._rect.top_right.tuple +\n self._rect.top_left.tuple\n )", "def Bounds(self):\n assert self.points is not None\n\n if self.points.shape[1] == 3:\n bounds = np.array([[np.min(self.points[:,0]),\n np.min(self.points[:,1]),\n np.min(self.points[:,2])],\n [np.max(self.points[:,0]),\n np.max(self.points[:,1]),\n np.max(self.points[:,2])]])\n makezero(bounds)\n return bounds\n elif self.points.shape[1] == 2:\n bounds = np.array([[np.min(self.points[:,0]),\n np.min(self.points[:,1])],\n [np.max(self.points[:,0]),\n np.max(self.points[:,1])]])\n makezero(bounds)\n return bounds\n elif self.points.shape[1] == 1:\n bounds = np.array([[np.min(self.points[:,0])],\n [np.max(self.points[:,0])]])\n makezero(bounds)\n return bounds\n else:\n raise ValueError(\"Invalid dimension for mesh coordinates\")", "def rectpolyctl(xmin,xmax,ymin,ymax):\n pc=[]\n pc.append((xmin,ymin))\n pc.append((xmin,ymax))\n pc.append((xmax,ymax))\n pc.append((xmax,ymin))\n pc.append((xmin,ymin))\n return pc", "def add_boundaries(self):\n\n bound_conns=[]\n bound_coords=[]\n bound_vert_index=[]\n throat_vert_index=[]\n #Find boundary extent\n [x_min,x_max,y_min,y_max,z_min,z_max]=vo.vertex_dimension(self,self.pores(),parm='minmax')\n min_point = np.around(np.array([x_min,y_min,z_min]),10)\n max_point = np.around(np.array([x_max,y_max,z_max]),10)\n Np = self.num_pores()\n Nt = self.num_throats()\n new_throat_count = 0\n # ridge_dict contains a dictionary where the key is a set of 2 neighbouring pores and the value is the vertex indices\n # that form the throat or ridge between them\n for p,v in self._vor.ridge_dict.items():\n # if the vertex with index -1 is contained in list then the ridge is unbounded - ignore these\n if np.all(np.asarray(v) >=0):\n #boundary throats will be those connecting one pore inside the original set and one out\n if (p[0] in range(Np) and p[1] not in range(Np)) or\\\n (p[0] not in range(Np) and p[1] in range(Np)):\n # the dictionary key is not in numerical order so find the pore index inside\n if p[0] in range(Np):\n my_pore=p[0]\n else:\n my_pore=p[1]\n my_pore_coord = self[\"pore.coords\"][my_pore]\n new_pore_coord = my_pore_coord.copy()\n #rounding necessary here to identify the plane as Voronoi can have 1e-17 and smaller errors\n throat_verts = np.around(self._vor.vertices[v],10)\n #find which plane we are aligned with (if any) and align new_pore with throat plane\n if len(np.unique(throat_verts[:,0])) == 1:\n new_pore_coord[0]=np.unique(throat_verts[:,0])\n elif len(np.unique(throat_verts[:,1])) == 1:\n new_pore_coord[1]=np.unique(throat_verts[:,1])\n elif len(np.unique(throat_verts[:,2])) == 1:\n new_pore_coord[2]=np.unique(throat_verts[:,2])\n else:\n new_pore_coord = throat_verts.mean()\n bound_coords.append(new_pore_coord)\n bound_conns.append(np.array([my_pore,new_throat_count+Np]))\n bound_vert_index.append(dict(zip(v,throat_verts)))\n throat_vert_index.append(dict(zip(v,throat_verts)))\n new_throat_count += 1\n\n #Add new pores and connections\n self.extend(pore_coords=bound_coords, throat_conns=bound_conns)\n #Record new number of pores\n Mp = self.num_pores()\n Mt = self.num_throats()\n new_pore_ids = np.arange(Np,Mp)\n new_throat_ids = np.arange(Nt,Mt)\n #Identify which boundary the pore sits on\n front = self.pores()[self['pore.coords'][:,0]==min_point[0]]\n back = self.pores()[self['pore.coords'][:,0]==max_point[0]]\n left = self.pores()[self['pore.coords'][:,1]==min_point[1]]\n right = self.pores()[self['pore.coords'][:,1]==max_point[1]]\n bottom = self.pores()[self['pore.coords'][:,2]==min_point[2]]\n top = self.pores()[self['pore.coords'][:,2]==max_point[2]]\n #Assign labels\n self['pore.boundary'] = False\n self['pore.boundary'][new_pore_ids] = True\n self['pore.right_boundary'] = False\n self['pore.left_boundary'] = False\n self['pore.front_boundary'] = False\n self['pore.back_boundary'] = False\n self['pore.top_boundary'] = False\n self['pore.bottom_boundary'] = False\n self['pore.right_boundary'][right] = True\n self['pore.left_boundary'][left] = True\n self['pore.front_boundary'][front] = True\n self['pore.back_boundary'][back] = True\n self['pore.top_boundary'][top] = True\n self['pore.bottom_boundary'][bottom] = True\n #Save the throat verts\n self[\"pore.vert_index\"][new_pore_ids] = bound_vert_index\n self[\"throat.vert_index\"][new_throat_ids] = throat_vert_index", "def draw_box(self) -> None:\n from math import pi, sin, cos\n import pymol\n from pymol import cmd\n\n # Convert angle\n angle1 = (self.angle1.value() / 180.0) * pi\n angle2 = (self.angle2.value() / 180.0) * pi\n\n # Get positions of box vertices\n # P1\n x1 = -self.min_x.value() * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n\n y1 = -self.min_y.value() * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n \n z1 = self.min_x.value() * sin(angle2) + self.min_y.value() * sin(angle1) * cos(angle2) - self.min_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P2\n x2 = self.max_x.value() * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n \n y2 = (-self.min_y.value()) * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n \n z2 = (-self.max_x.value()) * sin(angle2) - (-self.min_y.value()) * sin(angle1) * cos(angle2) + (-self.min_z.value()) * cos(angle1) * cos(angle2) + self.z\n\n # P3\n x3 = (-self.min_x.value()) * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n\n y3 = self.max_y.value() * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n\n z3 = -(-self.min_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + (-self.min_z.value()) * cos(angle1) * cos(angle2) + self.z\n\n # P4\n x4 = (-self.min_x.value()) * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n \n y4 = (-self.min_y.value()) * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n \n z4 = -(-self.min_x.value()) * sin(angle2) - (-self.min_y.value()) * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P5\n x5 = self.max_x.value() * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n \n y5 = self.max_y.value() * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n\n z5 = (-self.max_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + (-self.min_z.value()) * cos(angle1) * cos(angle2) + self.z\n\n # P6\n x6 = self.max_x.value() * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n \n y6 = (-self.min_y.value()) * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n \n z6 = (-self.max_x.value()) * sin(angle2) - (-self.min_y.value()) * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P7\n x7 = (-self.min_x.value()) * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n\n y7 = self.max_y.value() * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n\n z7 = -(-self.min_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P8\n x8 = self.max_x.value() * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n \n y8 = self.max_y.value() * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n \n z8 = (-self.max_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # Create box object\n pymol.stored.list = []\n if \"box\" in cmd.get_names(\"selections\"):\n cmd.iterate(\"box\", \"stored.list.append((name, color))\", quiet=1)\n list_color = pymol.stored.list\n cmd.delete(\"box\")\n if len(list_color) > 0:\n for item in list_color:\n at_name = item[0]\n at_c = item[1]\n cmd.set_color(at_name + \"color\", cmd.get_color_tuple(at_c))\n else:\n for at_name in [\"v2\", \"v3\", \"v4\", \"v5\", \"v6\", \"v7\", \"v8\", \"v1x\", \"v1y\", \"v1z\", \"v2x\", \"v3y\", \"v4z\"]:\n cmd.set_color(at_name + \"color\", [0.86, 0.86, 0.86])\n\n # Create vertices\n cmd.pseudoatom(\"box\", name=\"v2\", pos=[x2, y2, z2], color=\"v2color\")\n cmd.pseudoatom(\"box\", name=\"v3\", pos=[x3, y3, z3], color=\"v3color\")\n cmd.pseudoatom(\"box\", name=\"v4\", pos=[x4, y4, z4], color=\"v4color\")\n cmd.pseudoatom(\"box\", name=\"v5\", pos=[x5, y5, z5], color=\"v5color\")\n cmd.pseudoatom(\"box\", name=\"v6\", pos=[x6, y6, z6], color=\"v6color\")\n cmd.pseudoatom(\"box\", name=\"v7\", pos=[x7, y7, z7], color=\"v7color\")\n cmd.pseudoatom(\"box\", name=\"v8\", pos=[x8, y8, z8], color=\"v8color\")\n\n # Connect vertices\n cmd.select(\"vertices\", \"(name v3,v7)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v2,v6)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v5,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v2,v5)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v4,v6)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v4,v7)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v3,v5)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v6,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v7,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"box\", name=\"v1x\", pos=[x1, y1, z1], color='red')\n cmd.pseudoatom(\"box\", name=\"v2x\", pos=[x2, y2, z2], color='red')\n cmd.select(\"vertices\", \"(name v1x,v2x)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"box\", name=\"v1y\", pos=[x1, y1, z1], color='forest')\n cmd.pseudoatom(\"box\", name=\"v3y\", pos=[x3, y3, z3], color='forest')\n cmd.select(\"vertices\", \"(name v1y,v3y)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"box\", name=\"v4z\", pos=[x4, y4, z4], color='blue')\n cmd.pseudoatom(\"box\", name=\"v1z\", pos=[x1, y1, z1], color='blue')\n cmd.select(\"vertices\", \"(name v1z,v4z)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.delete(\"vertices\")", "def real_boundaries(self):\n return (self._points[0][1], self._points[0][3])", "def _add_boundaries(self):\n logger.info(\"add_boundaries: start of method\")\n\n import scipy.spatial as sptl\n import scipy.sparse as sprs\n Lx = self._Lx\n Ly = self._Ly\n Lz = self._Lz\n Np = self.num_pores()\n btype = self._btype\n boffset = 0.05\n\n #Translate internal pores to each face of domain\n poffset = np.zeros((7,3))\n poffset[[2,5],0] = [-Lx, Lx]\n poffset[[3,4],1] = [-Ly, Ly]\n poffset[[1,6],2] = [-Lz, Lz]\n pcoords = pcoords0 = self['pore.coords']\n for i in np.r_[1:7]:\n pcoords = np.concatenate((pcoords,pcoords0 + poffset[i,:]),axis=0)\n\n #Use some twisted logic to get bval list of + for boundary and - for periodic faces\n bval = [0, 1, 2, 3, 4, 5, 6]*(np.array([0, btype[2], btype[0], btype[1], btype[1], btype[0], btype[2]])*-2+1)\n ptype = np.zeros((Np,),dtype=int)\n for i in np.r_[1:7]:\n ptype = np.concatenate((ptype,np.ones((Np,),dtype=int)*bval[i]),axis=0)\n\n #pnum contains the internal ID number of the boundary pores (for connecting periodic points)\n pnum = self.pores()\n pnum = np.tile(pnum,7)\n\n Tri = sptl.Delaunay(pcoords)\n adjmat = sprs.lil_matrix((np.shape(pcoords)[0],np.shape(pcoords)[0]),dtype=int)\n for i in np.arange(0,np.shape(Tri.simplices)[0]):\n #Keep only simplices that are fully in real domain\n adjmat[Tri.simplices[i],Tri.simplices[i]] = 1\n adjmat = sprs.triu(adjmat,k=1,format=\"lil\")\n for i in np.arange(0,Np):\n #Add periodic throats to the netowrk (if any)\n tpore2 = pnum[adjmat.rows[i]][ptype[adjmat.rows[i]]<0]\n tpore1 = np.ones_like(tpore2,dtype=int)*i\n conns = self['throat.conns']\n conns = np.concatenate((conns,np.vstack((tpore1,tpore2)).T),axis=0)\n #Add boundary pores and throats to the network\n newporetyps = np.unique(ptype[adjmat.rows[i]][ptype[adjmat.rows[i]]>0])\n newporenums = np.r_[self.num_pores():self.num_pores()+np.size(newporetyps)]\n tpore2 = newporenums\n tpore1 = np.ones_like(tpore2,dtype=int)*i\n conns = np.concatenate((conns,np.vstack((tpore1,tpore2)).T),axis=0)\n self['throat.conns'] = conns\n bcoords = np.zeros((7,3),dtype=float)\n coords = self['pore.coords']\n bcoords[1,:] = [coords[i,0], coords[i,1], 0-Lz*boffset]\n bcoords[2,:] = [0-Lx*boffset, coords[i,1], coords[i,2]]\n bcoords[3,:] = [coords[i,0], -Ly*boffset, coords[i,2]]\n bcoords[4,:] = [coords[i,0], Ly+Ly*boffset, coords[i,2]]\n bcoords[5,:] = [Lx+Lx*boffset, coords[i,1], coords[i,2]]\n bcoords[6,:] = [coords[i,0], coords[i,1], Lz+Lz*boffset]\n newporecoords = bcoords[newporetyps,:]\n coords = np.concatenate((coords,newporecoords),axis=0)\n self['pore.coords'] = coords\n #Reset number of pores and throats (easier than tracking it)\n nums = np.r_[0:np.shape(coords)[0]]\n self['pore.numbering'] = nums\n self['pore.numbering'] = np.ones((nums[-1]+1,),dtype=bool)\n nums = np.r_[0:np.shape(conns)[0]]\n self['throat.numbering'] = nums\n self['throat.numbering'] = np.ones((nums[-1]+1,),dtype=bool)\n logger.debug(\"add_boundaries: end of method\")", "def _boundRect(self):\n self.upperleft = list(map(min, zip(*self.addresstamp)))\n self.bottomright = list(map(max, zip(*self.addresstamp)))\n self.upperright = [self.bottomright[0], self.upperleft[1]]\n self.bottomleft = [self.upperleft[0], self.bottomright[1]]\n (self.width, self.height) = (self.upperright[0] - self.bottomleft[0], self.bottomleft[1] - self.upperright[1])\n assert self.width >= 0\n assert self.height >= 0\n self.center = [self.upperleft[0] + self.width / float(2), self.upperleft[1] + self.height / float(2)]\n self.corners = [self.upperright, self.bottomleft, self.upperleft, self.bottomright]", "def test_bounding_rectangle(self, world):\n positions = [(0, 2), (2, 0), (3, 1), (2, 3)]\n for pos in positions:\n world.set_cell(pos)\n assert world.min_pos() == (0, 0) and world.max_pos() == (3, 3)", "def create_coords(self):\n phys_box = self.geom.bounds # the physical box where the domain lies\n\n # validation of the space step with the physical box size\n for k in range(self.dim):\n self.global_size.append((phys_box[k][1] - phys_box[k][0])/self.dx)\n if not self.global_size[-1].is_integer():\n self.log.error('The length of the box in the direction {0} must be a multiple of the space step'.format(k))\n sys.exit()\n\n region = self.mpi_topo.get_region(*self.global_size)\n region_size = [r[1] - r[0] for r in region]\n\n # spatial mesh\n halo_size = np.asarray(self.stencil.vmax)\n halo_beg = self.dx*(halo_size - 0.5)\n\n self.coords_halo = [np.linspace(phys_box[k][0] + self.dx*region[k][0] - halo_beg[k],\n phys_box[k][0] + self.dx*region[k][1] + halo_beg[k],\n region_size[k] + 2*halo_size[k]) for k in range(self.dim)]\n\n self.coords = [self.coords_halo[k][halo_size[k]:-halo_size[k]] for k in range(self.dim)]", "def _boundRect(self):\n addresstamp = reduce(lambda x, y: x + y, [v.addresstamp for v in self.footprints])\n self.upperleft = list(map(min, zip(*addresstamp)))\n self.bottomright = list(map(max, zip(*addresstamp)))\n self.upperright = [self.bottomright[0], self.upperleft[1]]\n self.bottomleft = [self.upperleft[0], self.bottomright[1]]\n (self.width, self.height) = (self.upperright[0] - self.bottomleft[0], self.bottomleft[1] - self.upperright[1])\n assert self.width >= 0\n assert self.height >= 0\n self.center = [self.upperleft[0] + self.width / float(2), self.upperleft[1] + self.height / float(2)]\n self.corners = [self.upperright, self.bottomleft, self.upperleft, self.bottomright]", "def boundary(self):\n if self.pos.x < 0:\n self.pos.x = 0\n if self.pos.x > WIDTH - 48:\n self.pos.x = WIDTH - 48\n if self.pos.y < 0:\n self.pos.y = 0\n if self.pos.y > HEIGHT - 48:\n self.pos.y = HEIGHT - 48\n\n self.rect.topleft = self.pos", "def _calculate_content(self, points):\n xs = [p.x for p in points]\n ys = [p.y for p in points]\n min_x = min(xs)\n max_x = max(xs)\n min_y = min(ys)\n max_y = max(ys)\n self._dx = max_x - min_x\n self._dy = max_y - min_y\n return pygame.Rect(min_x, min_y, self._dx, self._dy)", "def update_position(\n self, front_left_vertex, front_right_vertex, back_left_vertex, back_right_vertex\n ):\n\n self.front_left_vertex = front_left_vertex\n self.front_right_vertex = front_right_vertex\n self.back_left_vertex = back_left_vertex\n self.back_right_vertex = back_right_vertex\n\n # Initialize the line vortices that make up the ring vortex.\n self.front_leg = LineVortex(\n origin=self.front_right_vertex,\n termination=self.front_left_vertex,\n strength=self.strength,\n )\n self.left_leg = LineVortex(\n origin=self.front_left_vertex,\n termination=self.back_left_vertex,\n strength=self.strength,\n )\n self.back_leg = LineVortex(\n origin=self.back_left_vertex,\n termination=self.back_right_vertex,\n strength=self.strength,\n )\n self.right_leg = LineVortex(\n origin=self.back_right_vertex,\n termination=self.front_right_vertex,\n strength=self.strength,\n )\n\n # Initialize a variable to hold the centroid of the ring vortex.\n self.center = ps.geometry.centroid_of_quadrilateral(\n self.front_left_vertex,\n self.front_right_vertex,\n self.back_left_vertex,\n self.back_right_vertex,\n )", "def fitRectangle(self):\n \n #TODO MAKE SOMETHING MORE GENERIC!!\n \n fA, (fXg, fYg) = self.getArea_and_CenterOfMass()\n \n x1,y1, x2,y2 = self.getBoundingBox()\n #build a rectangle with same \"width\" as the polygon... is-it good enough??\n w = x2 - x1\n \n #but this width should not lead to go out of the bounding box!\n fW = min(w, (x2-fXg)*2, (fXg-x1)*2)\n \n #same area\n fH = fA / fW\n \n x1,y1, x2,y2 = [ int(round(v)) for v in [ fXg - fW/2.0, fYg - fH/2\n , fXg + fW/2.0, fYg + fH/2 ]]\n \n return x1,y1, x2,y2", "def run(self): \n\n # Dictionaries whose keys are labels of the points in a 2-D grid and values\n # are an instance of the class meshPoint holding the informaiton about \n # that mesh point\n self.boundaryPoints = {}\n self.internalPoints = {}\n\n # Rectangle \n if self.layoutType.lower() == 'rectangle': \n # Define the mesh for a rectanglular layout\n self.defineRectangleLayout()\n # Circle \n elif self.layoutType.lower() == 'circle':\n # Define the mesh for a circular layout\n self.defineCircleLayout()\n\n return [self.internalPoints,self.boundaryPoints]", "def set_bounding_box(self, top_left_y: int, top_left_x: int, bottom_right_y: int, bottom_right_x: int) -> None:\n self.top_left_y = top_left_y\n self.top_left_x = top_left_x\n self.bottom_right_y = bottom_right_y\n self.bottom_right_x = bottom_right_x", "def set_boundary(self, y, start_x, end_x):\n pass", "def set_points(self, pts):\n\n self.minX = sys.maxint\n self.minY = sys.maxint\n self.maxX = sys.maxint * -1\n self.maxY = sys.maxint * -1\n\n self.points = []\n #self.mbr = Rect()\n for p in pts:\n x,y = p\n\n if x < self.minX:\n self.minX = x\n if x > self.maxX:\n self.maxX = x\n if y < self.minY:\n self.minY = y\n if y > self.maxY:\n self.maxY = y\n\n self.points.append(Point(x,y))\n\n self.mbr = Rect(Point(self.minX,self.minY),Point(self.maxX,self.maxY))", "def boundaries_and_initialize():\n greenLower = (29, 86, 6) # define the lower and upper boundaries of the \"green\"\n greenUpper = (64, 255, 255)\n pts = [((200,300),(255,255,255), 0)]\n blanks = []\n linecolor = (0,0,0)\n counter = 1\n radius = 11\n return greenLower, greenUpper, pts, linecolor, counter, blanks, radius", "def Rectangle(points=None):\n if points is None:\n points = [[1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0]]\n if len(points) == 4:\n warnings.warn(\n 'Rectangle defined by 4 points is deprecated. Please use ``pyvista.Quadrilateral``.',\n PyVistaDeprecationWarning,\n )\n return Quadrilateral(points)\n if len(points) != 3:\n raise TypeError('Points must be given as length 3 np.ndarray or list')\n\n points, _ = _coerce_pointslike_arg(points)\n\n point_0 = points[0]\n point_1 = points[1]\n point_2 = points[2]\n\n vec_01 = point_1 - point_0\n vec_02 = point_2 - point_0\n vec_12 = point_2 - point_1\n\n scalar_pdct_01_02 = np.dot(vec_01, vec_02)\n scalar_pdct_01_12 = np.dot(vec_01, vec_12)\n scalar_pdct_02_12 = np.dot(vec_02, vec_12)\n\n null_scalar_products = [\n val\n for val in [scalar_pdct_01_02, scalar_pdct_01_12, scalar_pdct_02_12]\n if np.isclose(val, 0)\n ]\n if len(null_scalar_products) == 0:\n raise ValueError(\"The three points should defined orthogonal vectors\")\n if len(null_scalar_products) > 1:\n raise ValueError(\"Unable to build a rectangle with less than three different points\")\n\n points = np.array([point_0, point_1, point_2, point_0])\n if np.isclose(scalar_pdct_01_02, 0):\n points[3] = point_0 + vec_01 + vec_02\n cells = np.array([[4, 0, 1, 3, 2]])\n elif np.isclose(scalar_pdct_01_12, 0):\n points[3] = point_1 + vec_12 - vec_01\n cells = np.array([[4, 0, 1, 2, 3]])\n else:\n points[3] = point_2 - vec_02 - vec_12\n cells = np.array([[4, 0, 2, 1, 3]])\n\n return pyvista.PolyData(points, cells)", "def CellBoundary(self, p_int, , vtkIdList):\n ...", "def draw_grid(self, min_x, max_x, min_y, max_y, min_z, max_z) -> None:\n from pymol import cmd\n from math import sin, cos\n \n # Prepare dimensions\n angle1 = 0.0\n angle2 = 0.0\n min_x = x - min_x\n max_x = max_x - x \n min_y = y - min_y \n max_y = max_y - y \n min_z = z - min_z \n max_z = max_z - z \n\n # Get positions of grid vertices\n # P1\n x1 = -min_x * cos(angle2) - (-min_y) * sin(angle1) * sin(angle2) + (-min_z) * cos(angle1) * sin(angle2) + x\n\n y1 = -min_y * cos(angle1) + (-min_z) * sin(angle1) + y\n\n z1 = min_x * sin(angle2) + min_y * sin(angle1) * cos(angle2) - min_z * cos(angle1) * cos(angle2) + z\n \n # P2\n x2 = max_x * cos(angle2) - (-min_y) * sin(angle1) * sin(angle2) + (-min_z) * cos(angle1) * sin(angle2) + x\n\n y2 = (-min_y) * cos(angle1) + (-min_z) * sin(angle1) + y\n \n z2 = (-max_x) * sin(angle2) - (-min_y) * sin(angle1) * cos(angle2) + (-min_z) * cos(angle1) * cos(angle2) + z\n \n # P3\n x3 = (-min_x) * cos(angle2) - max_y * sin(angle1) * sin(angle2) + (-min_z) * cos(angle1) * sin(angle2) + x\n\n y3 = max_y * cos(angle1) + (-min_z) * sin(angle1) + y\n\n z3 = -(-min_x) * sin(angle2) - max_y * sin(angle1) * cos(angle2) + (-min_z) * cos(angle1) * cos(angle2) + z\n \n # P4\n x4 = (-min_x) * cos(angle2) - (-min_y) * sin(angle1) * sin(angle2) + max_z * cos(angle1) * sin(angle2) + x\n\n y4 = (-min_y) * cos(angle1) + max_z * sin(angle1) + y\n\n z4 = -(-min_x) * sin(angle2) - (-min_y) * sin(angle1) * cos(angle2) + max_z * cos(angle1) * cos(angle2) + z\n\n \n # P5\n x5 = max_x * cos(angle2) - max_y * sin(angle1) * sin(angle2) + (-min_z) * cos(angle1) * sin(angle2) + x\n\n y5 = max_y * cos(angle1) + (-min_z) * sin(angle1) + y\n\n z5 = (-max_x) * sin(angle2) - max_y * sin(angle1) * cos(angle2) + (-min_z) * cos(angle1) * cos(angle2) + z\n \n # P6\n x6 = max_x * cos(angle2) - (-min_y) * sin(angle1) * sin(angle2) + max_z * cos(angle1) * sin(angle2) + x\n\n y6 = (-min_y) * cos(angle1) + max_z * sin(angle1) + y\n\n z6 = (-max_x) * sin(angle2) - (-min_y) * sin(angle1) * cos(angle2) + max_z * cos(angle1) * cos(angle2) + z\n \n # P7\n x7 = (-min_x) * cos(angle2) - max_y * sin(angle1) * sin(angle2) + max_z * cos(angle1) * sin(angle2) + x\n\n y7 = max_y * cos(angle1) + max_z * sin(angle1) + y\n\n z7 = -(-min_x) * sin(angle2) - max_y * sin(angle1) * cos(angle2) + max_z * cos(angle1) * cos(angle2) + z\n\n # P8\n x8 = max_x * cos(angle2) - max_y * sin(angle1) * sin(angle2) + max_z * cos(angle1) * sin(angle2) + x\n\n y8 = max_y * cos(angle1) + max_z * sin(angle1) + y\n\n z8 = (-max_x) * sin(angle2) - max_y * sin(angle1) * cos(angle2) + max_z * cos(angle1) * cos(angle2) + z \n\n # Create box object\n if \"grid\" in cmd.get_names(\"objects\"):\n cmd.delete(\"grid\")\n\n # Create vertices\n cmd.pseudoatom(\"grid\", name=\"v2\", pos=[x2, y2, z2], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v3\", pos=[x3, y3, z3], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v4\", pos=[x4, y4, z4], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v5\", pos=[x5, y5, z5], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v6\", pos=[x6, y6, z6], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v7\", pos=[x7, y7, z7], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v8\", pos=[x8, y8, z8], color=\"white\")\n\n # Connect vertices\n cmd.select(\"vertices\", \"(name v3,v7)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v2,v6)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v5,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v2,v5)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v4,v6)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v4,v7)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v3,v5)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v6,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v7,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"grid\", name=\"v1x\", pos=[x1, y1, z1], color='white')\n cmd.pseudoatom(\"grid\", name=\"v2x\", pos=[x2, y2, z2], color='white')\n cmd.select(\"vertices\", \"(name v1x,v2x)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"grid\", name=\"v1y\", pos=[x1, y1, z1], color='white')\n cmd.pseudoatom(\"grid\", name=\"v3y\", pos=[x3, y3, z3], color='white')\n cmd.select(\"vertices\", \"(name v1y,v3y)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"grid\", name=\"v4z\", pos=[x4, y4, z4], color='white')\n cmd.pseudoatom(\"grid\", name=\"v1z\", pos=[x1, y1, z1], color='white')\n cmd.select(\"vertices\", \"(name v1z,v4z)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.delete(\"vertices\")", "def update_rect(self):\n self._update_vertex_list()", "def rectangular(m, n, len1=1.0, len2=1.0, origin = (0.0, 0.0)):\n\n from anuga.config import epsilon\n\n delta1 = float(len1)/m\n delta2 = float(len2)/n\n\n #Calculate number of points\n Np = (m+1)*(n+1)\n\n class Index(object):\n\n def __init__(self, n,m):\n self.n = n\n self.m = m\n\n def __call__(self, i,j):\n return j+i*(self.n+1)\n\n\n index = Index(n,m)\n\n points = num.zeros((Np, 2), float)\n\n for i in range(m+1):\n for j in range(n+1):\n\n points[index(i,j),:] = [i*delta1 + origin[0], j*delta2 + origin[1]]\n\n #Construct 2 triangles per rectangular element and assign tags to boundary\n #Calculate number of triangles\n Nt = 2*m*n\n\n\n elements = num.zeros((Nt, 3), int)\n boundary = {}\n nt = -1\n for i in range(m):\n for j in range(n):\n nt = nt + 1\n i1 = index(i,j+1)\n i2 = index(i,j)\n i3 = index(i+1,j+1)\n i4 = index(i+1,j)\n\n\n #Update boundary dictionary and create elements\n if i == m-1:\n boundary[nt, 2] = 'right'\n if j == 0:\n boundary[nt, 1] = 'bottom'\n elements[nt,:] = [i4,i3,i2] #Lower element\n nt = nt + 1\n\n if i == 0:\n boundary[nt, 2] = 'left'\n if j == n-1:\n boundary[nt, 1] = 'top'\n elements[nt,:] = [i1,i2,i3] #Upper element\n\n return points, elements, boundary", "def create_from_bounds(self, lbs, ubs):\n self.base_vertices = (np.array([lbs])+np.array([ubs])).T/2\n self.base_vectors = np.diag((np.array(ubs)-np.array(lbs))/2)" ]
[ "0.64728147", "0.62877953", "0.6205329", "0.60404485", "0.6037853", "0.5973269", "0.5953323", "0.5934904", "0.589541", "0.58810884", "0.58740115", "0.58539", "0.57664204", "0.5756188", "0.57504964", "0.57337874", "0.5713471", "0.5667736", "0.5666688", "0.5631335", "0.5629928", "0.5617965", "0.55948603", "0.5578393", "0.55475825", "0.5543929", "0.55400574", "0.5536814", "0.5505201", "0.5489213" ]
0.7203463
0
This method defines the mesh points for a circle, i.e., the position of all mesh points, position of internal mesh ooints and the position of mesh points on boundary The circle is assumed to be circumvented in a square whose side is 2R, where R is the radius of the circle. The origin of coordinate is put on the lower left vertex of this square. Note that if the origin was in the center of circle its equation will be x^2 + y^2 = R^2. If the origin is moved to the lower left vertex its equation will be (x R)^2 + (y R)^2 = R^2
def defineCircleLayout(self): # Define a 2-D array representing the position of each mesh point self.xPoints = self.frange(0,self.R,self.h) self.yPoints = self.frange(0,self.R,self.h) # Position of internal mesh points internal_xyCoord = [(i,j) for i in self.xPoints for j in self.yPoints if (i - self.R)**2 + (j - self.R)**2 < self.R^2] # Define the dictionary containing internal points for k in internal_xyCoord: x = k[0] y = k[1] xLabel = xPoints.index(x) yLabel = yPoints.index(y) self.internalPoints[(xLabel,yLabel)] = meshPoint(type = 'internal',x = x, y = y, xLabel = xLabel, yLabel = yLabel) # Position of the boundary points # Find the intersection of each mesh line with the circle # For a given vertical mesh line: # y = R - sqrt(R^2 - (x-R)^2) & y = R + sqrt(R^2 - (x-R)^2) # For a given horizontal mesh line: # x = R - sqrt(R^2 - (y-R)^2) & x = R + sqrt(R^2 - (y-R)^2) boundary_xyCoord = [(0,self.R),(self.R,0),(self.R,2*self.R),(2*self.R,self.R)] + [(x,self.R - math.sqrt(self.R**2 - (x-self.R)**2)) for x in self.xPoints[1:len(self.xPoints)-1]] + [(x,self.R - math.sqrt(self.R**2 + (x-self.R)**2)) for x in self.xPoints[1:len(self.xPoints)-1]] + [(self.R - math.sqrt(self.R**2 - (y-self.R)**2),y) for y in self.yPoints[1:len(yPoints)-1]] + [(self.R + math.sqrt(self.R**2 - (y-self.R)**2),y) for y in self.yPoints[1:len(yPoints)-1]] # Define the dictionary containing boundary points for k in boundary_xyCoord: x = k[0] y = k[1] [xLabel,yLabel] = self.findLabel(x,y) self.boundaryPoints[(xLabel,yLabel)] = meshPoint(type = 'boundary',x = x, y = y, xLabel = xLabel, yLabel = yLabel) # Now that we have assigned the labels we can define fE, fW, fN and fS self.fCalc()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Circle(self, center=(0.,0.), radius=1., nrad=16, ncirc=40,\n element_type=\"tri\", refinement=False, refinement_level=2, algorithm=\"standard\"):\n\n if not isinstance(center,tuple):\n raise ValueError(\"The center of the circle should be given in a tuple with two elements (x,y)\")\n\n self.__reset__()\n\n if algorithm == \"midpoint_subdivision\":\n from Florence.MeshGeneration.CustomMesher import SubdivisionCircle\n mesh = SubdivisionCircle(center=center, radius=radius, nrad=nrad, ncirc=ncirc,\n element_type=element_type, refinement=refinement, refinement_level=refinement_level)\n self.__update__(mesh)\n return\n\n if refinement:\n ndivider = refinement_level\n if nrad==1: nrad=2\n else:\n ndivider = 1\n\n ncirc = int(ncirc/ndivider)\n nrad = int(nrad/ndivider)\n\n\n if ncirc % 8 != 0 or ncirc < 8:\n ncirc = (ncirc // 8)*8 + 8\n\n radii = radius\n\n radius = np.linspace(0,radii,nrad+1)[1:]\n t = np.linspace(0,2*np.pi,ncirc+1)\n x = radius[0]*np.sin(t)[::-1][:-1]\n y = radius[0]*np.cos(t)[::-1][:-1]\n\n points = np.zeros((ncirc+1,2),dtype=np.float64)\n points[0,:] = [0.,0.]\n points[1:,:] = np.array([x,y]).T\n\n\n self.elements = np.zeros((ncirc // 2,4),dtype=np.int64)\n aranger = np.arange(ncirc // 2)\n self.elements[:,1] = 2*aranger + 1\n self.elements[:,2] = 2*aranger + 2\n self.elements[:,3] = 2*aranger + 3\n self.elements[-1,-1] = 1\n\n for i in range(1,nrad):\n t = np.linspace(0,2*np.pi,ncirc+1);\n x = radius[i]*np.sin(t)[::-1][:-1];\n y = radius[i]*np.cos(t)[::-1][:-1];\n points = np.vstack((points,np.array([x,y]).T))\n\n points[:,0] += center[0]\n points[:,1] += center[1]\n\n elements = np.zeros((ncirc,4),dtype=np.int64)\n for i in range(1,nrad):\n aranger = np.arange(1+ncirc*(i-1),ncirc*i+1)\n elements[:,0] = aranger\n elements[:,1] = aranger + ncirc\n elements[:,2] = np.append((aranger + 1 + ncirc)[:-1],i*ncirc+1)\n elements[:,3] = np.append((aranger + 1)[:-1],1+(i-1)*ncirc)\n\n self.elements = np.concatenate((self.elements,elements),axis=0)\n\n makezero(points)\n self.points = points\n self.elements[:ncirc // 2,:] = self.elements[:ncirc // 2, [1,2,3,0]]\n\n self.element_type = \"quad\"\n self.nelem = self.elements.shape[0]\n self.nnode = self.points.shape[0]\n self.GetBoundaryEdges()\n\n if refinement:\n mesh = self.QuadrilateralProjection(points=self.points[self.elements[0,:],:], npoints=ndivider)\n for i in range(1,self.nelem):\n mesh += self.QuadrilateralProjection(points=self.points[self.elements[i,:],:], npoints=ndivider)\n self.__update__(mesh)\n\n # SECOND LEVEL OF REFINEMENT IF NEEDED\n # mesh = self.QuadrilateralProjection(points=self.points[self.elements[0,:],:], npoints=2)\n # for i in range(1,self.nelem):\n # mesh += self.QuadrilateralProjection(points=self.points[self.elements[i,:],:], npoints=2)\n # self.__update__(mesh)\n\n if element_type == \"tri\":\n sys.stdout = open(os.devnull, \"w\")\n self.ConvertQuadsToTris()\n sys.stdout = sys.__stdout__", "def circle():\n xmin=0\n xmax=6.5\n ymin=0.\n ymax=6.5\n\n x = arange(xmin, xmax, 0.005)\n y = x*1.\n [xx, yy] = meshgrid(x, y)\n\n zz=sqrt((xx-3.2475)**2.+(yy-3.2475)**2.)\n zz2=zz*1.\n zz2[(zz <= 3.25)]=1.\n zz2[(zz <= 3.25*0.2)]=0.\n zz2[(zz > 3.25)]=0.\n zz3=zeros(numpy.array(numpy.shape(zz2))/10)\n for i in arange(len(xx)/10):\n for j in arange(len(yy)/10):\n zz3[i,j]=numpy.sum(zz2[(i*10):(i*10+10),(j*10):(j*10+10)])/100.\n\n return zz3", "def circle_point(radius, phi):\n if radius <= 0:\n raise AssertionError('Radius mast be grater than 0')\n x = radius * cos(radians(phi))\n y = radius * sin(radians(phi))\n z = 0\n\n return x, y, z", "def _generate_circle(self, center, radius):\n assert len(center) in [2, 3], 'Center of circle must have 2 or 3 elements'\n assert radius > 0, 'Radius must be greater than zero'\n return Point(*center).buffer(radius)", "def circle_from_points(a, b, c):\n ab = subtract_vectors(b, a)\n cb = subtract_vectors(b, c)\n ba = subtract_vectors(a, b)\n ca = subtract_vectors(a, c)\n ac = subtract_vectors(c, a)\n bc = subtract_vectors(c, b)\n normal = normalize_vector(cross_vectors(ab, ac))\n d = 2 * length_vector_sqrd(cross_vectors(ba, cb))\n A = length_vector_sqrd(cb) * dot_vectors(ba, ca) / d\n B = length_vector_sqrd(ca) * dot_vectors(ab, cb) / d\n C = length_vector_sqrd(ba) * dot_vectors(ac, bc) / d\n Aa = scale_vector(a, A)\n Bb = scale_vector(b, B)\n Cc = scale_vector(c, C)\n center = add_vectorlist([Aa, Bb, Cc])\n radius = distance_point_point(center, a)\n return center, radius, normal", "def circle_center(top_aerofoil_points, bottom_aerofoil_points):\n q = np.array(top_aerofoil_points[0].coordinates) - np.array(top_aerofoil_points[1].coordinates)\n r = np.array(bottom_aerofoil_points[-1].coordinates) - np.array(bottom_aerofoil_points[-2].coordinates)\n c = np.cross(q, [0, 0, -1]) / np.linalg.norm(q)\n d = np.cross(r, [0, 0, 1]) / np.linalg.norm(r)\n radius = (q[1] - r[1]) / (d[1] - c[1])\n s = q + radius * c\n return Point(tuple(-s))", "def getCircleCircumscribed(self):\n p1, p2, p3 = self.points\n a1 = - (p2.x - p1.x) / (p2.y - p1.y)\n b1 = (p2.x ** 2 - p1.x ** 2 + p2.y ** 2 - p1.y ** 2) / (2 * (p2.y - p1.y))\n a2 = - (p3.x - p2.x) / (p3.y - p2.y)\n b2 = (p3.x ** 2 - p2.x ** 2 + p3.y ** 2 - p2.y ** 2) / (2 * (p3.y - p2.y))\n x = (b1 - b2) / (a2 - a1)\n y = a1 * x + b1\n radius = math.hypot(p1.x - x, p1.y - y)\n return Circle(x, y, radius=radius)", "def circle(cls, radius, position, open_circle=False):\n\n nb_points = 2*np.pi*radius/1\n points1 = radius*np.transpose(np.concatenate(([np.cos(2*np.pi*np.arange(0,nb_points+1)/nb_points)],[np.sin(2*np.pi*np.arange(0,nb_points+1)/nb_points)]),axis=0))\n \n for y in range(points1.shape[0]):\n points1[y,:]=points1[y,:]+position\n \n circle_obj = cls()\n circle_obj.coord = [points1]\n circle_obj.open = open_circle\n return circle_obj", "def get_circle_coords(center, r):\n circle = [[r, 180* phi/3.14159265] for phi in range(0, 180, 5)]\n circle = [pol2cart(p[0], p[1]) + (center[0], center[1]) for p in circle]\n return circle", "def circle(center, perp_vect, radius, element_number=10):\n # tl = [0, 0.2, 0.4, 0.6, 0.8]\n tl = np.linspace(0, 1, element_number)\n\n # vector form center to edge of circle\n # u is a unit vector from the centre of the circle to any point on the\n # circumference\n\n # normalized perpendicular vector\n n = perp_vect / np.linalg.norm(perp_vect)\n\n # normalized vector from the centre to point on the circumference\n u = perpendicular_vector(n)\n u /= np.linalg.norm(u)\n\n pts = []\n\n for t in tl:\n # u = np.array([0, 1, 0])\n # n = np.array([1, 0, 0])\n pt = (\n radius * np.cos(t * 2 * np.pi) * u\n + radius * np.sin(t * 2 * np.pi) * np.cross(u, n)\n + center\n )\n\n pt = pt.tolist()\n pts.append(pt)\n\n return pts", "def CirclePoints(center,radius,num_points=10):\n t=np.linspace(0., 2.*np.pi, num_points, endpoint = False)\n # define points\n points=[(center[0]+radius*np.cos(angle),center[1]+\n radius*np.sin(angle)) for angle in t]\n return points", "def DrawSolidCircle(self, center, radius, axis, color):\r\n radius *= self.zoom\r\n if radius < 1:\r\n radius = 1\r\n else: radius = int(radius)\r\n\r\n pygame.draw.circle(self.surface, (color/2).bytes+[127],\r\n center, radius, 0)\r\n pygame.draw.circle(self.surface, color.bytes, center, radius, 1)\r\n pygame.draw.aaline(self.surface, (255, 0, 0), center,\r\n (center[0] - radius*axis[0], center[1] +\r\n radius*axis[1]))", "def Icosphere(radius=1.0, center=(0.0, 0.0, 0.0), nsub=3):\n mesh = Icosahedron()\n mesh.clear_data()\n mesh = mesh.subdivide(nsub=nsub)\n\n # scale to desired radius and translate origin\n dist = np.linalg.norm(mesh.points, axis=1, keepdims=True) # distance from origin\n mesh.points = mesh.points * (radius / dist) + center\n return mesh", "def circle(self, center_x, center_y, radius, color):\n x = radius - 1\n y = 0\n d_x = 1\n d_y = 1\n err = d_x - (radius << 1)\n while x >= y:\n self.pixel(center_x + x, center_y + y, color)\n self.pixel(center_x + y, center_y + x, color)\n self.pixel(center_x - y, center_y + x, color)\n self.pixel(center_x - x, center_y + y, color)\n self.pixel(center_x - x, center_y - y, color)\n self.pixel(center_x - y, center_y - x, color)\n self.pixel(center_x + y, center_y - x, color)\n self.pixel(center_x + x, center_y - y, color)\n if err <= 0:\n y += 1\n err += d_y\n d_y += 2\n if err > 0:\n x -= 1\n d_x += 2\n err += d_x - (radius << 1)", "def get_circle_radius(self, point, center):\n x, y, z = point[:]\n x0, y0, z0 = center[:]\n return math.sqrt((x-x0)**2 + (y-y0)**2 + (z-z0)**2)", "def circle(self, x, y, r, solid = False):\n px = 0\n py = r\n d = 1 - 2 * r\n err = 0\n while py >= 0:\n if solid:\n for i in range(x - px, x + px + 1):\n self.pixel(i, y + py, 1)\n self.pixel(i, y - py, 1)\n else:\n self.pixel(x + px, y + py, 1)\n self.pixel(x + px, y - py, 1)\n self.pixel(x - px, y + py, 1)\n self.pixel(x - px, y - py, 1)\n err = 2 * (d + py) - 1\n if d < 0 and err <= 0:\n px += 1\n d += 2 *px + 1\n else:\n err = 2 * (d - px) - 1\n if d > 0 and err > 0:\n py -= 1\n d += 1 - 2 * py\n else:\n px += 1\n d += 2 * (px - py)\n py -= 1", "def Circle(radius=0.5, resolution=100):\n points = np.zeros((resolution, 3))\n theta = np.linspace(0.0, 2.0 * np.pi, resolution, endpoint=False)\n points[:, 0] = radius * np.cos(theta)\n points[:, 1] = radius * np.sin(theta)\n cells = np.array([np.append(np.array([resolution]), np.arange(resolution))])\n return wrap(pyvista.PolyData(points, cells))", "def circle(self,image,radius,i,j,c_x,c_y):\r\n major_axis=radius\r\n minor_axis=radius\r\n self.ellipse(image,major_axis,minor_axis,i,j,c_x,c_y)", "def get_circle(a, b, c):\n vec = [a[0]**2 + a[1]**2, b[0]**2 + b[1]**2, c[0]**2 + c[1]**2]\n x_mat = [vec, [a[1], b[1], c[1]], [1]*3]\n y_mat = [vec, [a[0], b[0], c[0]], [1]*3]\n d_mat = [[a[0], b[0], c[0]], [a[1], b[1], c[1]], [1] * 3]\n d = 2 * det(d_mat)\n x = 1 / d * det(x_mat)\n y = -1 / d * det(y_mat)\n center = [x, y]\n #r = norm(center - a)\n r = norm([center[0]-a[0], center[1]-a[1]])\n return center, r", "def model_circle(r, nx, ny, nz=1):\n\te = EMData()\n\te.set_size(nx, ny, nz)\n\te.process_inplace(\"testimage.circlesphere\", {\"radius\":r, \"fill\":1})\n\treturn e", "def circle(self):\n return circle(self.N, self.o, self.r)", "def Sphere(self,radius=1.0, npoints=10):\n\n # RESET MESH\n self.__reset__()\n\n from math import pi, cos, sin\n from meshpy.tet import MeshInfo, build\n from meshpy.geometry import generate_surface_of_revolution, EXT_OPEN, GeometryBuilder\n\n r = radius\n\n points = npoints\n dphi = pi/points\n\n def truncate(r):\n if abs(r) < 1e-10:\n return 0\n else:\n return r\n\n rz = [(truncate(r*sin(i*dphi)), r*cos(i*dphi)) for i in range(points+1)]\n\n geob = GeometryBuilder()\n geob.add_geometry(*generate_surface_of_revolution(rz,\n closure=EXT_OPEN, radial_subdiv=10))\n\n mesh_info = MeshInfo()\n geob.set(mesh_info)\n\n mesh = build(mesh_info)\n\n self.points = np.asarray(mesh.points)\n self.elements = np.asarray(mesh.elements)\n # self.faces = np.asarray(mesh.faces)\n # self.edges = np.asarray(self.edges)\n self.nelem = self.elements.shape[0]\n self.element_type = \"tet\"\n\n\n # GET EDGES & FACES - NONE ASSIGNMENT IS NECESSARY OTHERWISE IF FACES/EDGES ALREADY EXIST\n # THEY WON'T GET UPDATED\n self.faces = None\n self.edges = None\n self.GetBoundaryFacesTet()\n self.GetBoundaryEdgesTet()\n\n # CHECK MESH\n points = self.points[np.unique(self.faces),:]\n if not np.isclose(np.linalg.norm(points,axis=1),radius).all():\n raise ValueError(\"MeshPy could not construct a valid linear mesh for sphere\")", "def pos_on_semicircle(x, r, cxy):\n pos = np.sqrt(r ** 2 - (x - cxy[0]) ** 2) + cxy[1]\n\n return pos", "def _circle_intersection(self, circle, point):\n dist = euclidean_distance((circle[0], circle[1]), point) - circle[2]\n vun = vec2d((circle[0] - point[0]), (circle[1] - point[1]))\n v = vun.normalized()\n\n x, y = (point[0] + dist * v.x), (point[0] + dist * v.x)\n\n return dist, (x, y)", "def point_inside_circle(x,y,center_x,center_y,radius):\n return (x-center_x)**2 + (y - center_y)**2 < radius**2", "def parametrized_circle(point_a, point_b, point_c, theta):\n radius, center = shortest_line_to_point(point_a, point_b, point_c)\n # print'center, radius \\n', center, radius\n center_axis = np.subtract(point_a, point_b)\n # print 'center axis %s , radius %s, center %s' % (center_axis, radius, center)\n # center_axis dot <1,1,z> = 0 returns perp vector\n in_plane = norm_vect(np.subtract(point_c, center))\n perp_1 = np.cross(center_axis, in_plane)\n perp_2 = np.cross(center_axis, perp_1)\n # print 'perp dick', perp_1, perp_2\n # norm perpendicular vectors\n perp_1 = norm_vect(perp_1)\n perp_2 = norm_vect(perp_2)\n if -1e-6 > np.dot(perp_1, perp_2) > 1e-6 or -1e-6 > (np.dot(perp_1, center_axis)) > 1e-6 or \\\n -1e-6 > np.dot(perp_2, center_axis) > 1e-6:\n print 'not perpendicular'\n # print np.dot(perp_1, perp_2), np.dot(perp_1, center_axis), np.dot(perp_2, center_axis)\n x = center[0] + (radius * math.cos(theta) * perp_2[0]) + (radius * math.sin(theta) * perp_1[0])\n y = center[1] + (radius * math.cos(theta) * perp_2[1]) + (radius * math.sin(theta) * perp_1[1])\n z = center[2] + (radius * math.cos(theta) * perp_2[2]) + (radius * math.sin(theta) * perp_1[2])\n return [x, y, z]", "def draw_circle(self, x0, y0, r, color=None):\n f = 1 - r\n ddF_x = 1\n ddF_y = -2 * r\n x = 0\n y = r\n\n self.set(x0, y0 + r, color)\n self.set(x0, y0 - r, color)\n self.set(x0 + r, y0, color)\n self.set(x0 - r, y0, color)\n\n while x < y:\n if f >= 0:\n y -= 1\n ddF_y += 2\n f += ddF_y\n x += 1\n ddF_x += 2\n f += ddF_x\n\n self.set(x0 + x, y0 + y, color)\n self.set(x0 - x, y0 + y, color)\n self.set(x0 + x, y0 - y, color)\n self.set(x0 - x, y0 - y, color)\n self.set(x0 + y, y0 + x, color)\n self.set(x0 - y, y0 + x, color)\n self.set(x0 + y, y0 - x, color)\n self.set(x0 - y, y0 - x, color)", "def circle(draw, centrex, centrey, radius, color=\"#AAAAAAFF\") -> None:\n # convert cartesian centre to pixel centre\n cx, cy = pixelcoord(centrex, centrey)\n # top left and bottom right coordinates\n rect = [(cx-radius, cy-radius), (cx+radius, cy+radius)]\n # draw\n draw.arc(rect, 0, 360, color)", "def CircularPlate(self, side_length=30, radius=10, center=(0.,0.), ncirc=5, nrad=5, element_type=\"tri\"):\n\n self.CircularArcPlate(side_length=side_length, radius=radius, center=(0,0),\n start_angle=0., end_angle=np.pi/4., ncirc=ncirc,\n nrad=nrad, element_type=element_type)\n\n # First mirror the points along 45 degree axis\n self.elements = np.fliplr(self.elements)\n mmesh = deepcopy(self)\n mmesh.points[:,0] = self.points[:,1][::-1]\n mmesh.points[:,1] = self.points[:,0][::-1]\n mmesh.elements = np.fliplr(mmesh.elements)\n self += mmesh\n self.CheckNodeNumbering(change_order_to=\"anti-clockwise\", verbose=False)\n\n # Mirror along Y axis\n nmesh = deepcopy(self)\n nmesh.points[:,0] *= -1.\n nmesh.CheckNodeNumbering(change_order_to=\"anti-clockwise\", verbose=False)\n self += nmesh\n\n # Mirror along X axis\n nmesh = deepcopy(self)\n nmesh.points[:,1] *= -1.\n nmesh.CheckNodeNumbering(change_order_to=\"anti-clockwise\", verbose=False)\n self += nmesh\n\n # This needs to be done here\n self.points[:,0] += center[0]\n self.points[:,1] += center[1]\n\n self.nelem = self.elements.shape[0]\n self.nnode = self.points.shape[0]\n self.GetBoundaryEdges()", "def pointOnCircle(cx, cy, radius, angle):\n angle = math.radians(angle) - (math.pi / 2)\n x = cx + radius * math.cos(angle)\n if x < cx:\n x = math.ceil(x)\n else:\n x = math.floor(x)\n\n y = cy + radius * math.sin(angle)\n\n if y < cy:\n y = math.ceil(y)\n else:\n y = math.floor(y)\n\n return (int(x), int(y))" ]
[ "0.6620402", "0.66109216", "0.65732807", "0.6343558", "0.632906", "0.6316999", "0.6316061", "0.62945837", "0.6293581", "0.6277532", "0.6220626", "0.6174127", "0.61614174", "0.6084093", "0.6067162", "0.6029042", "0.5996201", "0.59894913", "0.59739685", "0.59610116", "0.59384876", "0.59339094", "0.59189546", "0.59053975", "0.59005886", "0.589076", "0.5890658", "0.58891755", "0.58833754", "0.58506995" ]
0.7477296
0
Finds the label of a point in the mesh. Here, label simply means numbering of the points. x and y are the x and ycoordinates of the mesh point
def findLabel(self, x, y): #- xLabel - if x in self.xPoints: xLabel = self.xPoints.index(self.boundary_xyCoord[k][0]) else: # Find the points on the left and right of this point in xPoints done = 0 k = 0 while done == 0: if x > self.xPoints[k]: done = 1; leftP = (self.xPoints[k],y) rightP = (self.xPoints[k+1],y) else: k += 1 # Check whether the left or right point is an internal mesh point if leftP in self.internal_xyCoord and rightP in self.internal_xyCoord: # It is impossible to have both the left and right points inside a # grid unless we deal with a non-convex shape (thid should be # addressed in future developments) raise customError('**Error! Both the left or right points of the given point are inside the mesh') elif leftP in self.internal_xyCoord: # If the left point is inside the grid the label of the current i # point should be one plus the label of the point on its left xLabel = self.xPoints.index(leftP[0]) + 1 elif rightP in self.internal_xyCoord: # If the right point is inside the grid the label of the current # point should be one minos the label of the point on its right xLabel = self.xPoints.index(rightP[0]) - 1 else: raise customError('**Error! Neither the left or right points of the given point are inside the mesh') #- yLabel - if y in self.self.yPoints: yLabel = self.yPoints.index(self.boundary_xyCoord[k][0]) else: # Find the points on the left and right of this point in xPoints done = 0 k = 0 while done == 0: if y > self.yPoints[k]: done = 1; lowerP = (x,self.yPoints[k]) upperP = (x,self.yPoints[k+1]) else: k += 1 # Check whether the lower or upper point is an internal mesh point if lowerP in self.internal_xyCoord and upperP in self.internal_xyCoord: # It is impossible to have both the lower and upper points # inside a grid unless we deal with a non-convex shape raise customError('**Error! Both the lower or upper points of the given point are inside the mesh') elif lowerP in self.internal_xyCoord: # If the lower point is inside the grid the label of the current # point should be one plus the label of the point on its lower yLabel = self.xPoints.index(lowerP[0]) + 1 elif upperP in self.internal_xyCoord: # If the upper point is inside the grid the label of the current # point should be one minos the label of the point on its upper yLabel = self.xPoints.index(upperP[0]) - 1 else: raise customError('**Error! Neither the lower or upper points of the given point are inside the mesh')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetPointForLabel(points):\n # TODO: find the last point at a minute boundary\n return points[-1]", "def _pos2label(self, p, labels):\n if labels is not None:\n if p in labels.keys():\n return labels[p]\n else:\n return ''\n # raise ValueError('Fatal ERROR: no label for this position in label dictionary!')\n else:\n if p == 1:\n return 'top'\n elif p == 2:\n return 'bottom'\n elif p == 3:\n return 'left'\n elif p == 4:\n return 'right'", "def add_point_label(self, point, label, color):\n self.vertices_label[point].append((label, color))", "def labels(self):\n return self.label(self.p_y_given_x)", "def find_label(self, *args):\n return _ida_hexrays.cfunc_t_find_label(self, *args)", "def fromLabel(name):\n return Data.labels.index(name)", "def getLabel(*args):", "def getLabel(*args):", "def getLabel(*args):", "def _draw_label(label, label_x, label_y):\n pass", "def getLabelInfo(self, label): # real signature unknown; restored from __doc__\n pass", "def _get_labels(x_label, y_label, title, xlabel_str):\n if x_label is None:\n x_label = xlabel_str\n\n if y_label is None:\n y_label = \"Degree of membership\"\n\n if title is None:\n title = \"Degrees of membership of the samples to each cluster\"\n\n return x_label, y_label, title", "def DrawPointLabel(self, dc, mDataDict):\n # ----------\n dc.SetPen(wx.Pen(wx.BLACK))\n dc.SetBrush(wx.Brush( wx.BLACK, wx.SOLID ) )\n \n sx, sy = mDataDict[\"scaledXY\"] #scaled x,y of closest point\n dc.DrawRectangle( sx-5,sy-5, 10, 10) #10by10 square centered on point\n px,py = mDataDict[\"pointXY\"]\n cNum = mDataDict[\"curveNum\"]\n pntIn = mDataDict[\"pIndex\"]\n legend = mDataDict[\"legend\"]\n #make a string to display\n s = \"Crv# %i, '%s', Pt. (%.2f,%.2f), PtInd %i\" %(cNum, legend, px, py, pntIn)\n dc.DrawText(s, sx , sy+1)\n # -----------", "def label(self, location, *args, **kwargs):\n\n if isinstance(location, fslimage.Image):\n return self.maskLabel(location, *args, **kwargs)\n else:\n return self.coordLabel(location, *args, **kwargs)", "def draw_label(self, image, point, label, font=cv2.FONT_HERSHEY_SIMPLEX, font_scale=0.8, thickness=1):\n # gets the size of the label\n size = cv2.getTextSize(label, font, font_scale, thickness)[0]\n # where the position is\n x, y = point\n # gets the rectangle size\n cv2.rectangle(image, (x, y - size[1]), (x + size[0], y), (255, 0, 0), cv2.FILLED)\n cv2.putText(image, label, point, font, font_scale, (255, 255, 255), thickness, lineType=cv2.LINE_AA)", "def find_label(self, *args):\n return _ida_hexrays.cfuncptr_t_find_label(self, *args)", "def get_label(self, label):\n\n return torch.from_numpy(np.array(label)).long()", "def draw_shape_label(self, label, xform, colour):\n #TODO deal with alignment, rotation\n pos = xform.chain(Point(label.x, label.y))\n self.canvas.text((pos.x, pos.y), label.text, fill=colour)", "def label(self, p_y_given_x):\n return np.argmax(p_y_given_x, axis=2).T", "def getLabel2(*args):", "def getLabel2(*args):", "def label(self):\n return self._label_shape", "def put_label(i):\n i = min(i, len(x) - 2)\n dx = sx[i + 1] - sx[i]\n dy = sy[i + 1] - sy[i]\n rotation = np.rad2deg(math.atan2(dy, dx)) + rotation_offset\n pos = [(x[i] + x[i + 1]) / 2. + offset[0],\n (y[i] + y[i + 1]) / 2 + offset[1]]\n plt.text(pos[0],\n pos[1],\n label_text,\n size=9,\n rotation=rotation,\n color=line.get_color(),\n ha=\"center\",\n va=\"center\",\n bbox=dict(ec='1', fc='1', alpha=0.8))", "def label(tree):\n return tree[0]", "def get_graph_label(\n self,\n graph: ParametricFunction,\n label: float | str | Mobject = \"f(x)\",\n x_val: float | None = None,\n direction: Sequence[float] = RIGHT,\n buff: float = MED_SMALL_BUFF,\n color: ParsableManimColor | None = None,\n dot: bool = False,\n dot_config: dict | None = None,\n ) -> Mobject:\n\n if dot_config is None:\n dot_config = {}\n if color is None:\n color = graph.get_color()\n label = self.x_axis._create_label_tex(label).set_color(color)\n\n if x_val is None:\n # Search from right to left\n for x in np.linspace(self.x_range[1], self.x_range[0], 100):\n point = self.input_to_graph_point(x, graph)\n if point[1] < config[\"frame_y_radius\"]:\n break\n else:\n point = self.input_to_graph_point(x_val, graph)\n\n label.next_to(point, direction, buff=buff)\n label.shift_onto_screen()\n\n if dot:\n dot = Dot(point=point, **dot_config)\n label.add(dot)\n label.dot = dot\n return label", "def _get_label ( self ):\n if self._label is not None:\n return self._label\n return 'Column %d' % (self.index + 1)", "def _get_axis_label(\n self,\n label: float | str | Mobject,\n axis: Mobject,\n edge: Sequence[float],\n direction: Sequence[float],\n buff: float = SMALL_BUFF,\n ) -> Mobject:\n\n label = self.x_axis._create_label_tex(label)\n label.next_to(axis.get_edge_center(edge), direction=direction, buff=buff)\n label.shift_onto_screen(buff=MED_SMALL_BUFF)\n return label", "def ex_label(self,label,argl):\n if len(label) > 0 and label[0] != '_':\n return label\n comment = ''\n for i in argl:\n phrase = ''\n if i == 'l':\n phrase = label\n elif i in self._labels.keys():\n phrase = self._labels[i]\n comment += phrase\n return comment", "def get_coords_by_label_2D(image, label):\n coords = np.argwhere(image == label)\n y = [y for y, x in coords]\n x = [x for y, x in coords]\n return y, x", "def put_label(i):\n i = min(i, len(x)-2)\n dx = sx[i+1] - sx[i]\n dy = sy[i+1] - sy[i]\n rotation = np.rad2deg(math.atan2(dy, dx)) + rotation_offset\n pos = [(x[i] + x[i+1])/2. + offset[0], (y[i] + y[i+1])/2 + offset[1]]\n plt.text(pos[0], pos[1], label_text, size=9, rotation=rotation, color = line.get_color(),\n ha=\"center\", va=\"center\", bbox = dict(ec='1',fc='1',alpha=0.8))" ]
[ "0.6582488", "0.6358846", "0.6352905", "0.6296264", "0.6275451", "0.6212019", "0.62040955", "0.62040955", "0.62040955", "0.6174925", "0.61322933", "0.6091263", "0.6082127", "0.6060067", "0.6033801", "0.6001611", "0.5974338", "0.5960098", "0.59467596", "0.59385484", "0.59385484", "0.59050095", "0.5899838", "0.58886814", "0.5882662", "0.58754456", "0.5869119", "0.5854531", "0.58503807", "0.58454657" ]
0.7756455
0
Computes fE, fW, fN and fS for each mesh point. These are used to calcualte the coefficients needed to computing the Laplacian
def fCalc(self): # A dictionary composed of all internal and boundary points allPoints = dict(self.internalPoints.items() + self.boundaryPoints.items()) for pointLabel in allPoints.keys(): # Compute fE, fW, fN and fW only for internal mesh points if allPoints[pointLabel].type.lower() == 'internal': xLabel = pointLabel[0] yLabel = pointLabel[1] x = self.internalPoints[(xLabel,yLabel)].x y = self.internalPoints[(xLabel,yLabel)].y xE = allPoints[(xLabel + 1,yLabel)].x xW = allPoints[(xLabel - 1,yLabel)].x yN = allPoints[(xLabel,yLabel + 1)].y yS = allPoints[(xLabel,yLabel - 1)].y if (xE - x)/self.h < -0.000001 or (xE - x)/self.h > 1.000001: errorMessage = '**Error! (xE - x)/h for the point with label ('+str(xLabel) + ',' + str(yLabel) + ') and coordinate (' + str(x) + ',' + str(y) + ') is not between zero and one: (xE - x)/h = ' + str((xE - x)/self.h) raise customError(errorMessage) else: self.internalPoints[(xLabel,yLabel)].fE = (xE - x)/self.h # Note that in the following we use -0.000001 and 1.000001 # instead of 0 and 1, respectively, to avoid problems with # with very small fractions. For example if the fractions is # greater than one by 2.22e-16 the condition (x - xW)/self.h > 1 # will be false and the code returns an error if (x - xW)/self.h < -0.000001 or (x - xW)/self.h > 1.000001: errorMessage = '**Error! (x - xW)/h for the point with label ('+str(xLabel) + ',' + str(yLabel) + ') and coordinate (' + str(x) + ',' + str(y) + ') is not between zero and one: (x - xW)/h = ' + str((x - xW)/self.h) raise customError(errorMessage) else: self.internalPoints[(xLabel,yLabel)].fW = (x - xW)/self.h if (yN - y)/self.h < -0.000001 or (yN - y)/self.h > 1.000001: errorMessage = '**Error! (yN - y)/h for the point with label ('+str(xLabel) + ',' + str(yLabel) + ') and coordinate (' + str(x) + ',' + str(y) + ') is not between zero and one: (yN - y)/h = ' + str((yN - y)/self.h) raise customError(errorMessage) else: self.internalPoints[(xLabel,yLabel)].fN = (yN - y)/self.h if (y - yS)/self.h < -0.000001 or (y - yS)/self.h > 1.000001: errorMessage = '**Error! (y - yS)/h for the point with label ('+str(xLabel) + ',' + str(yLabel) + ') and coordinate (' + str(x) + ',' + str(y) + ') is not between zero and one: (y - yS)/h = ' + str((y - yS)/self.h) raise customError(errorMessage) else: self.internalPoints[(xLabel,yLabel)].fS = (y - yS)/self.h # Calculate the coeeficients requried to compute the Laplacian self.internalPoints[(xLabel,yLabel)].LapCoeffCalc()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_mesh_laplacian(mesh, weights=None, fem_b=None, lap_type=\"conformal\"):\n print(\" Computing Laplacian\")\n if weights is None:\n (weights, fem_b) = compute_mesh_weights(mesh, weight_type=lap_type)\n\n if lap_type == \"fem\":\n weights.data = weights.data / 2\n\n N = weights.shape[0]\n sB = fem_b.sum(axis=0)\n diaB = sparse.dia_matrix((sB, 0), shape=(N, N))\n B = sparse.lil_matrix(diaB + fem_b)\n s = weights.sum(axis=0)\n dia = sparse.dia_matrix((s, 0), shape=(N, N))\n L = sparse.lil_matrix(dia - weights)\n\n # if symmetrize == 1 & & normalize == 0\n # L = diag(sum(W, 2)) - W;\n # elseif\n # symmetrize == 1 & & normalize == 1\n # L = speye(n) - diag(sum(W, 2). ^ (-1 / 2)) * W * diag(\n # sum(W, 2). ^ (-1 / 2));\n # elseif\n # symmetrize == 0 & & normalize == 1\n # L = speye(n) - diag(sum(W, 2). ^ (-1)) * W;\n\n li = np.hstack(L.data)\n print(\" -nb Nan in Laplacian : \", len(np.where(np.isnan(li))[0]))\n print(\" -nb Inf in Laplacian : \", len(np.where(np.isinf(li))[0]))\n\n return L, B", "def efSolver(self):\n dx = self.dh[0] # dx\n dy = self.dh[1] # dy\n dz = self.dh[2] # dz\n \n for i in np.arange(0, self.ni):\n for j in np.arange(0, self.nj):\n for k in np.arange(0, self.nk):\n\n #x-component#\n if i==0: \n # forward\n self.ef[i][j][k][0] = -(-3*self.phi[i][j][k]+\\\n 4*self.phi[i+1][j][k]-\\\n self.phi[i+2][j][k])/(2*dx)\n elif i==self.ni-1: \n # backward\n self.ef[i][j][k][0] = -(self.phi[i-2][j][k]-\\\n 4*self.phi[i-1][j][k]+\\\n 3*self.phi[i][j][k])/(2*dx)\n else: \n #central\n self.ef[i][j][k][0] = -(self.phi[i+1][j][k] - \\\n self.phi[i-1][j][k])/(2*dx)\n\n #y-component\n if j==0:\n self.ef[i][j][k][1] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j+1][k]-\\\n self.phi[i][j+2][k])/(2*dy)\n elif j==self.nj-1:\n self.ef[i][j][k][1] = -(self.phi[i][j-2][k] - \\\n 4*self.phi[i][j-1][k] +\\\n 3*self.phi[i][j][k])/(2*dy)\n else:\n self.ef[i][j][k][1] = -(self.phi[i][j+1][k] - \\\n self.phi[i][j-1][k])/(2*dy)\n\n #z-component\n if k==0:\n self.ef[i][j][k][2] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j][k+1]-\n self.phi[i][j][k+2])/(2*dz)\n elif k==self.nk-1:\n self.ef[i][j][k][2] = -(self.phi[i][j][k-2] - \\\n 4*self.phi[i][j][k-1] + \\\n 3*self.phi[i][j][k])/(2*dz)\n else:\n self.ef[i][j][k][2] = -(self.phi[i][j][k+1] - \\\n self.phi[i][j][k-1])/(2*dz)", "def _create_coefficients(self) -> npt.NDArray[np.complex_ | np.float_]:\n _logger.info(\n f\"Slepian eigenvalue {self.rank}: \"\n f\"{self.mesh_slepian.slepian_eigenvalues[self.rank]:e}\",\n )\n s_p_i = self.mesh_slepian.slepian_functions[self.rank]\n return sleplet.slepian_methods.slepian_mesh_forward(\n self.mesh_slepian,\n u_i=s_p_i,\n )", "def efSolver2(self):\n dx = self.dh[0] # dx\n dy = self.dh[1] # dy\n dz = self.dh[2] # dz\n \n \"\"\"\n for i in np.arange(0, self.ni):\n for j in np.arange(0, self.nj):\n for k in np.arange(0, self.nk):\n \"\"\"\n\n ##x-component#\n #if i==0: \n #x-component#\n \"\"\"\n if i==0: \n # forward\n self.ef[i][j][k][0] = -(-3*self.phi[i][j][k]+\\\n 4*self.phi[i+1][j][k]-\\\n self.phi[i+2][j][k])/(2*dx)\n \"\"\"\n \n # forward\n self.ef[0,0:self.nj,0:self.nk,0] = -(-3*self.phi[0,0:self.nj,0:self.nk]+\\\n 4*self.phi[1,0:self.nj,0:self.nk]-\\\n self.phi[2,0:self.nj,0:self.nk])/(2*dx)\n \n #elif i==self.ni-1: \n \"\"\"\n elif i==self.ni-1: \n # backward\n self.ef[i][j][k][0] = -(self.phi[i-2][j][k]-\\\n 4*self.phi[i-1][j][k]+\\\n 3*self.phi[i][j][k])/(2*dx)\n \"\"\" \n # backward\n self.ef[self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[self.ni-3,0:self.nj,0:self.nk]-\\\n 4*self.phi[self.ni-2,0:self.nj,0:self.nk]+\\\n 3*self.phi[self.ni-1,0:self.nj,0:self.nk])/(2*dx)\n \"\"\"\n else: \n #central\n self.ef[i][j][k][0] = -(self.phi[i+1][j][k] - \\\n self.phi[i-1][j][k])/(2*dx)\n \"\"\" \n #central\n self.ef[1:self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[2:self.ni,0:self.nj,0:self.nk] - \\\n self.phi[0:self.ni-2,0:self.nj,0:self.nk])/(2*dx)\n\n\n #y-component\n #if j==0:\n \"\"\"\n if j==0:\n self.ef[i][j][k][1] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j+1][k]-\\\n self.phi[i][j+2][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,0,0:self.nk,1] = -(-3*self.phi[0:self.ni,0,0:self.nk] + \\\n 4*self.phi[0:self.ni,1,0:self.nk]-\\\n self.phi[0:self.ni,2,0:self.nk])/(2*dy)\n #elif j==self.nj-1:\n \"\"\"\n elif j==self.nj-1:\n self.ef[i][j][k][1] = -(self.phi[i][j-2][k] - \\\n 4*self.phi[i][j-1][k] +\\\n 3*self.phi[i][j][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,self.nj-3,0:self.nk] - \\\n 4*self.phi[0:self.ni,self.nj-2,0:self.nk] +\\\n 3*self.phi[0:self.ni,self.nj-1,0:self.nk])/(2*dy)\n #else:\n \"\"\"\n else:\n self.ef[i][j][k][1] = -(self.phi[i][j+1][k] - \\\n self.phi[i][j-1][k])/(2*dy)\n\n \"\"\"\n self.ef[0:self.ni,1:self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,2:self.nj,0:self.nk] - \\\n self.phi[0:self.ni,0:self.nj-2,0:self.nk])/(2*dy)\n\n #z-component\n '''\n if k==0:\n self.ef[i][j][k][2] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j][k+1]-\n self.phi[i][j][k+2])/(2*dz)\n \n '''\n #z-component\n #if k==0:\n self.ef[0:self.ni,0:self.nj,0,2] = -(-3*self.phi[0:self.ni,0:self.nj,0] + \\\n 4*self.phi[0:self.ni,0:self.nj,1]-\n self.phi[0:self.ni,0:self.nj,2])/(2*dz)\n\n \"\"\"\n elif k==self.nk-1:\n self.ef[i][j][k][2] = -(self.phi[i][j][k-2] - \\\n 4*self.phi[i][j][k-1] + \\\n 3*self.phi[i][j][k])/(2*dz)\n \"\"\"\n \n #elif k==self.nk-1:\n self.ef[0:self.ni,0:self.nj,self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,self.nk-3] - \\\n 4*self.phi[0:self.ni,0:self.nj,self.nk-2] + \\\n 3*self.phi[0:self.ni,0:self.nj,self.nk-1])/(2*dz) \n \"\"\"\n else:\n self.ef[i][j][k][2] = -(self.phi[i][j][k+1] - \\\n self.phi[i][j][k-1])/(2*dz)\n \"\"\"\n #else:\n self.ef[0:self.ni,0:self.nj,1:self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,2:self.nk] - \\\n self.phi[0:self.ni,0:self.nj,0:self.nk-2])/(2*dz)", "def make_cp_le_forcing_vec_SS(cons_pot_mesh, lin_geo_mesh, u_d, f, l, mu):\n pot_faces = cons_pot_mesh.get_faces()\n assert pot_faces.shape[0] == lin_geo_mesh.get_faces().shape[0]\n num_faces = pot_faces.shape[0]\n\n x_c = lin_geo_mesh.get_centroid()\n c_0 = 1. / (4. * np.pi)\n\n # make Power and Miranda supplementary flow vector\n f_s = f / (-8. * np.pi * mu) # the script F seen in Pozrikidis\n l_s = l / (-8. * np.pi * mu) # the script L seen in Pozrikidis\n v_s = np.empty(3 * num_faces)\n for src_num in range(num_faces):\n node = cons_pot_mesh.get_node(src_num)\n v_s[(3 * src_num) : (3 * src_num + 3)] = np.einsum(\n \"il,l->i\", geo.stokeslet(node, x_c), f_s\n ) + np.einsum(\n \"il,l->i\", geo.rotlet(node, x_c), l_s\n )\n c_s = c_0 * (u_d - v_s) # script C term from Pozrikidis\n fv = np.copy(c_s) # must copy\n\n # make integral of c_s dotted with normal vector term\n S_D = lin_geo_mesh.get_surface_area()\n for face_num in range(num_faces):\n face_n = lin_geo_mesh.get_normal(face_num)\n face_hs = lin_geo_mesh.get_hs(face_num)\n for src_num in range(num_faces):\n src_n = lin_geo_mesh.get_normal(src_num)\n # setting c_s as constant over element\n j = 3 * face_num\n k = 3 * src_num\n sub_vec = src_n * np.dot(c_s[j : j+3], face_n) * face_hs * 0.5\n fv[k : k+3] += (-1. / (2. * S_D)) * sub_vec\n return fv", "def surface(self):\n # return sum(np.outer(basis_function, control_point) for basis_function, control_point in zip(self.basis_1, self.basis)).T\n # return sum(np.outer(basis_function_1, self.control_net[ii, jj]) for ((ii, basis_function_1), (jj, basis_function_2)) in zip(enumerate(self.basis_1), enumerate(self.basis_2))).T\n # return sum(np.outer(basis_function_1, self.control_net[ii, jj]) + np.outer(basis_function_2, self.control_net[ii, jj]) for ((ii, basis_function_1), (jj, basis_function_2)) in zip(enumerate(self.basis_1), enumerate(self.basis_2))).T\n\n # x = np.zeros_like(self.xi_1_mesh)\n # y = np.zeros_like(self.xi_1_mesh)\n # z = np.zeros_like(self.xi_1_mesh)\n xyz = np.zeros((*self.xi_1_mesh.shape, 3))\n for (i, basis_function_i), (j, basis_function_j) in itertools.product(enumerate(self.basis_1), enumerate(self.basis_2)):\n print(i, basis_function_i)\n print(j, basis_function_j)\n print(self.control_net[i, j])\n # b1, b2 = np.meshgrid(basis_function_i, basis_function_j, indexing = 'ij')\n control_x, control_y, control_z = self.control_net[i, j]\n # print(b1.shape, b2.shape, np.array(self.control_net[i, j]).shape)\n # print((b1 * b2).shape)\n # z += np.outer(b1 * b2, self.control_net[i, j])\n # print(np.shape(z))\n print(np.outer(basis_function_i, basis_function_j))\n # x += np.outer(basis_function_i, basis_function_j) * control_x\n # y += np.outer(basis_function_i, basis_function_j) * control_y\n # z += np.outer(basis_function_i, basis_function_j) * control_z\n print(np.outer(basis_function_i, basis_function_j).shape)\n print(np.outer(np.outer(basis_function_i, basis_function_j), self.control_net[i, j]).shape)\n print(np.outer(np.outer(basis_function_i, basis_function_j), np.array(self.control_net[i, j])).shape)\n r = np.einsum('i,j,k->ijk', basis_function_i, basis_function_j, np.array(self.control_net[i, j]))\n print(r.shape)\n xyz += r\n\n # print(x, y, z)\n\n # return x, y, z\n return xyz", "def Forward(Fin, z, sizenew, Nnew ):\n if z <= 0:\n raise ValueError('Forward does not support z<=0')\n Fout = Field.begin(sizenew, Fin.lam, Nnew, Fin._dtype)\n \n field_in = Fin.field\n field_out = Fout.field\n \n field_out[:,:] = 0.0 #default is ones, clear\n \n old_size = Fin.siz\n old_n = Fin.N\n new_size = sizenew #renaming to match cpp code\n new_n = Nnew\n\n on2 = int(old_n/2)\n nn2 = int(new_n/2) #read \"new n over 2\"\n dx_new = new_size/(new_n-1)\n dx_old = old_size/(old_n-1)\n #TODO again, dx seems better defined without -1, check this\n \n R22 = _np.sqrt(1/(2*Fin.lam*z))\n\n X_new = _np.arange(-nn2, new_n-nn2) * dx_new\n Y_new = X_new #same\n X_old = _np.arange(-on2, old_n-on2) * dx_old\n Y_old = X_old #same\n for i_new in range(new_n):\n x_new = X_new[i_new]\n \n P1 = R22*(2*(X_old-x_new)+dx_old)\n P3 = R22*(2*(X_old-x_new)-dx_old)\n Fs1, Fc1 = _fresnel(P1)\n Fs3, Fc3 = _fresnel(P3)\n for j_new in range(new_n):\n y_new = Y_new[j_new]\n \n P2 = R22*(2*(Y_old-y_new)-dx_old)\n P4 = R22*(2*(Y_old-y_new)+dx_old)\n Fs2, Fc2 = _fresnel(P2)\n Fs4, Fc4 = _fresnel(P4)\n \n C4C1=_np.outer(Fc4, Fc1) #out[i, j] = a[i] * b[j] \n C2S3=_np.outer(Fc2, Fs3) #-> out[j,i] = a[j]*b[i] here\n C4S1=_np.outer(Fc4, Fs1)\n S4C1=_np.outer(Fs4, Fc1)\n S2C3=_np.outer(Fs2, Fc3)\n C2S1=_np.outer(Fc2, Fs1)\n S4C3=_np.outer(Fs4, Fc3)\n S2C1=_np.outer(Fs2, Fc1)\n C4S3=_np.outer(Fc4, Fs3)\n S2S3=_np.outer(Fs2, Fs3)\n S2S1=_np.outer(Fs2, Fs1)\n C2C3=_np.outer(Fc2, Fc3)\n S4S1=_np.outer(Fs4, Fs1)\n C4C3=_np.outer(Fc4, Fc3)\n C4C1=_np.outer(Fc4, Fc1)\n S4S3=_np.outer(Fs4, Fs3)\n C2C1=_np.outer(Fc2, Fc1)\n \n Fr = 0.5 * field_in.real\n Fi = 0.5 * field_in.imag\n Temp_c = (Fr * (C2S3 + C4S1 + S4C1 + S2C3\n - C2S1 - S4C3 - S2C1 - C4S3)\n + Fi * (-S2S3 + S2S1 + C2C3 - S4S1\n - C4C3 + C4C1 + S4S3 - C2C1)\n + 1j * Fr *(-C4C1 + S2S3 + C4C3 - S4S3\n + C2C1 - S2S1 + S4S1 - C2C3)\n + 1j * Fi*(C2S3 + S2C3 + C4S1 + S4C1\n - C4S3 - S4C3 - C2S1 - S2C1))\n field_out[j_new, i_new] = Temp_c.sum() #complex elementwise sum\n Fout._IsGauss=False\n return Fout", "def minkowskiFunctionals(X):\n F = np.array([[0, 0, 0], [0, 1, 4], [0, 2, 8]])\n XF = signal.convolve2d(X, F, mode='same')\n edges = np.arange(0, 17, 1)\n h, edges = np.histogram(XF[:], bins=edges)\n f_intra = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]\n e_intra = [0, 2, 1, 2, 1, 2, 2, 2, 0, 2, 1, 2, 1, 2, 2, 2]\n v_intra = [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n EulerNb8 = np.sum(h*v_intra - h*e_intra + h*f_intra)\n f_inter = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\n e_inter = [0, 0, 0, 1, 0, 1, 0, 2, 0, 0, 0, 1, 0, 1, 0, 2]\n v_inter = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]\n EulerNb4 = np.sum(h*v_inter - h*e_inter + h*f_inter)\n Area = sum(h*f_intra)\n Perimeter = sum(-4*h*f_intra + 2*h*e_intra)\n\n return Area, Perimeter, EulerNb8, EulerNb4", "def compute_mesh_eigenfunctions(self, mesh, star0, star1, bdry=False):\n nb = len(mesh)\n\n inputs = []\n for m, s0, s1 in zip(mesh, star0, star1):\n d = m['int_d01']\n if bdry:\n d = scipy.sparse.vstack([d, m['bdry_d01']])\n inputs.extend([s0, s1, d])\n\n eigenvalues, eigenvectors = [], []\n outputs = self.hodgefunc(nb, self.num_eigenvectors,\n self.num_extra_eigenvectors, *inputs)\n for i in range(nb):\n eigenvalues.append(outputs[2*i])\n eigenvectors.append(outputs[2*i+1])\n\n return eigenvalues, eigenvectors", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H1 + sigma_H1\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def construct_linear_system(self):\n N=self.grid.Ncells()\n Nbc = len(self.dirichlet_bcs)\n self.Ncalc=Ncalc = N - Nbc\n\n # map cells to forced values\n dirichlet = dict( [ (c,v) for c,v,xy in self.dirichlet_bcs])\n\n self.is_calc_c = is_calc_c = np.ones(N,np.bool8)\n for c,v,xy in self.dirichlet_bcs:\n is_calc_c[c] = False\n\n # is_calc_c[self.c_mask] = False\n\n # c_map is indexed by real cell indices, and returns the matrix index\n c_map = self.c_map = np.zeros(N,np.int32)\n self.c_map[is_calc_c] = np.arange(Ncalc)\n\n dzc=self.dzc\n dzf=self.dzf\n area_c=self.area_c\n\n meth='coo' # 'dok'\n if meth == 'dok':\n A=sparse.dok_matrix((Ncalc,Ncalc),np.float64)\n else:\n # construct the matrix from a sequence of indices and values\n ij=[]\n values=[] # successive value for the same i.j will be summed\n \n b = np.zeros(Ncalc,np.float64)\n flux_per_gradient_j = -self.K_j * self.l_j * dzf / self.d_j * self.dt\n\n self.grid.edge_to_cells() # makes sure that edges['cells'] exists.\n \n for j in range(self.grid.Nedges()):\n e = self.grid.edges[j]\n ic1,ic2 = e['cells']\n \n if ic1<0 or ic2<0 or e['deleted']:\n continue # boundary edge, or deleted edge\n \n flux_per_gradient=flux_per_gradient_j[j]\n \n # this is the desired operation:\n # Cdiff[ic1] -= flux_per_gradient / (An[ic1]*dzc) * (C[ic2] - C[ic1])\n # Cdiff[ic2] += flux_per_gradient / (An[ic2]*dzc) * (C[ic2] - C[ic1])\n # Where Cdiff is row, C is col\n\n if is_calc_c[ic1] and is_calc_c[ic2]:\n mic2 = c_map[ic2]\n mic1 = c_map[ic1]\n v1=flux_per_gradient / (area_c[ic1]*dzc[ic1])\n v2=flux_per_gradient / (area_c[ic2]*dzc[ic2])\n \n if meth == 'dok':\n A[mic1,mic2] -= v1\n A[mic1,mic1] += v1\n A[mic2,mic2] += v2\n A[mic2,mic1] -= v2\n else:\n ij.append( (mic1,mic2) ) ; values.append(-v1)\n ij.append( (mic1,mic1) ) ; values.append(v1)\n ij.append( (mic2,mic2) ) ; values.append(v1)\n ij.append( (mic2,mic1) ) ; values.append(-v1)\n \n elif not ( is_calc_c[ic1] or is_calc_c[ic2] ):\n # both are dirichlet, so nothing to do\n pass\n elif not is_calc_c[ic2]:\n mic1 = c_map[ic1]\n v=flux_per_gradient / (self.area_c[ic1]*dzc[ic1])\n if meth == 'dok':\n A[mic1,mic1] += v\n else:\n ij.append( (mic1,mic1) )\n values.append(v)\n\n # roughly\n # A[1,1]*x[1] + A[1,2]*x[2] + ... = b[1]\n # but we already know x[2],\n # A[1,1]*x[1] + ... = b[1] - A[1,2]*x[2]\n # so flip the sign, multiply by known dirichlet value, and\n # add to the RHS\n b[mic1] += flux_per_gradient / (area_c[ic1]*dzc[ic1]) * dirichlet[ic2]\n else: # not is_calc_c[c1]\n mic2 = c_map[ic2]\n # A[mic2,mic2] += flux_per_gradient / (area_c[ic2]*dzc[ic2])\n # A[mic2,mic1] -= flux_per_gradient / (area_c[ic2]*dzc[ic2])\n\n # A[mic2,mic2]*x[2] + A[mic2,mic1]*x[1] = b[2]\n # ...\n # A[mic2,mic2]*x[2] - flux_per_gradient / (area_c[ic2]*dzc[ic2])*x[1] = b[2]\n # ...\n # A[mic2,mic2]*x[2] = b[2] + flux_per_gradient / (area_c[ic2]*dzc[ic2])*x[1]\n v=flux_per_gradient / (area_c[ic2]*dzc[ic2])\n if meth == 'dok':\n A[mic2,mic2] += v\n else:\n ij.append( (mic2,mic2) )\n values.append(v)\n b[mic2] += flux_per_gradient / (area_c[ic2]*dzc[ic2]) * dirichlet[ic1]\n\n # Used to test 'is not 0:' but modern python complains\n if isinstance(self.alpha,np.ndarray): \n for c in range(N):\n if self.is_calc_c[c]:\n mic=self.c_map[c]\n v=self.alpha[c]*self.dt\n if meth == 'dok':\n A[mic,mic] -= v\n else:\n ij.append( (mic,mic) )\n values.append(-v)\n\n # Flux boundary conditions:\n for ic,value,xy in self.neumann_bcs:\n mic=c_map[ic]\n # make mass/time into concentration/step\n # arrived at minus sign by trial and error.\n # 2023-08-04: there was a bug here that used ic2 instead of ic.\n b[mic] -= value/(area_c[ic]*dzc[ic]) * self.dt\n\n if meth == 'dok':\n self.A = sparse.coo_matrix(A)\n else:\n ijs=np.array(ij,dtype=np.int32)\n data=np.array(values,dtype=np.float64)\n A=sparse.coo_matrix( (data, (ijs[:,0],ijs[:,1]) ), shape=(Ncalc,Ncalc) )\n self.A=A\n \n # report scale to get a sense of whether dt is too large\n Ascale = A.diagonal().min()\n log.debug(\"Ascale is %s\"%Ascale)\n\n self.b = b", "def calculate_fwhm(self, surface, xy_data, PSF_window, N_points, spaxel_scale, wavelength, mode='diffraction'):\n\n start = time()\n # Calculate the Geometric PSF\n x, y = xy_data[:, 0], xy_data[:, 1]\n cent_x, cent_y = np.mean(x), np.mean(y)\n\n # Estimate the Geometric PSF using Kernel Density Estimation. The XY raytrace results are random samples\n # drawn from a probability distribution, the Geometric PSF. KDE estimates that distribution.\n # The main parameter of interest is the 'bandwidth' which defines the width of the kernel that KDE uses to\n # estimate the distribution. A narrower kernel will give a GeoPSF with finer structure; too wide a kernel will\n # just wash away the structure. We found that a bandwidth equal to the standard deviation of the raytrace data\n # works well\n std_x, std_y = np.std(x), np.std(y)\n bandwidth = min(std_x, std_y)\n kde = KernelDensity(kernel='gaussian', bandwidth=1.0*bandwidth).fit(xy_data)\n\n # define a grid to compute the PSF\n xmin, xmax = cent_x - PSF_window/2/1000, cent_x + PSF_window/2/1000\n ymin, ymax = cent_y - PSF_window/2/1000, cent_y + PSF_window/2/1000\n x_grid = np.linspace(xmin, xmax, N_points)\n y_grid = np.linspace(ymin, ymax, N_points)\n xx_grid, yy_grid = np.meshgrid(x_grid, y_grid)\n xy_grid = np.vstack([xx_grid.ravel(), yy_grid.ravel()]).T\n log_scores = kde.score_samples(xy_grid)\n\n psf_geo = np.exp(log_scores)\n psf_geo /= np.max(psf_geo)\n psf_geo = psf_geo.reshape(xx_grid.shape)\n\n time_geopsf = time() - start\n # print(\"Time to estimate GeoPSF: %.3f sec\" % time_geo)\n\n if mode == \"diffraction\":\n start = time()\n\n psf_diffr = diffraction.add_diffraction(surface=surface, psf_geo=psf_geo, PSF_window=PSF_window,\n scale_mas=spaxel_scale, wavelength=wavelength)\n time_diffpsf = time() - start\n # print(\"Time to add Diffraction: %.3f sec\" % time_diffpsf)\n\n # Fit the PSF to a 2D Gaussian\n start = time()\n guess_x = PSF_window / 2 / 1000\n fwhm_x, fwhm_y = diffraction.fit_psf_to_gaussian(xx=xx_grid, yy=yy_grid, psf_data=psf_diffr,\n x0=cent_x, y0=cent_y, sigmax0=guess_x, sigmay0=guess_x)\n psf_result = psf_diffr\n\n elif mode == \"geometric\":\n\n start = time()\n guess_x = PSF_window / 2 / 1000\n fwhm_x, fwhm_y = diffraction.fit_psf_to_gaussian(xx=xx_grid, yy=yy_grid, psf_data=psf_geo,\n x0=cent_x, y0=cent_y, sigmax0=guess_x, sigmay0=guess_x)\n psf_result = psf_geo\n\n # fig, (ax1, ax2) = plt.subplots(1, 2)\n # img1 = ax1.imshow(psf_geo, extent=[xmin, xmax, ymin, ymax], cmap='plasma', origin='lower')\n # ax1.scatter(x, y, s=1, color='white', alpha=0.5)\n # plt.colorbar(img1, ax=ax1, orientation='horizontal')\n # ax1.set_xlabel(r'X [mm]')\n # ax1.set_ylabel(r'Y [mm]')\n # ax1.set_title(r'Geometric PSF estimate | Surface: %s' % surface)\n #\n # ax2.plot(x_grid, psf_geo[N_points // 2])\n # xbins, bins, p = ax2.hist(x, bins=np.linspace(xmin, xmax, N_points), density=True)\n # for item in p:\n # item.set_height(item.get_height() / np.max(xbins))\n # ax2.set_ylim([0, 1])\n # plt.show()\n\n time_gauss = time() - start\n\n # print('FWHM time: %.3f sec for GeoPSF estimate:' % time_geopsf)\n # print('FWHM time: %.3f sec for DiffPSF convolution:' % time_diffpsf)\n # print('FWHM time: %.3f sec for Gaussian fit:' % time_gauss)\n\n #\n # img2 = ax2.imshow(psf_diffr, extent=[xmin, xmax, ymin, ymax], cmap='plasma', origin='lower')\n # plt.colorbar(img2, ax=ax2, orientation='horizontal')\n # ax2.set_xlabel(r'X [mm]')\n # ax2.set_ylabel(r'Y [mm]')\n # if surface == 'DET':\n # ax2.set_title(r'Diffr. PSF | %.3f microns | %.1f mas | FWHM_x: %.1f $\\mu$m' % (wavelength, spaxel_scale, fwhm_x))\n # elif surface == 'IS':\n # ax2.set_title(r'Diffr. PSF | %.3f microns | %.1f mas | FWHM_y: %.1f $\\mu$m' % (wavelength, spaxel_scale, fwhm_y))\n\n return fwhm_x, fwhm_y, psf_result", "def calc_xi(self):\n\t\n\tk_dot_x = self.k[0]*self.x[0,:,:] + self.k[1]*self.x[1,:,:] + self.k[2]*self.x[2,:,:]\n\n\tself.xi = self.t.reshape((1,self.N)) - k_dot_x/l.Clight\n\n\treturn", "def _field_Fresnel(z, field, dx, lam, dtype, usepyFFTW):\n \n \"\"\" *************************************************************\n Major differences to Cpp based LP version:\n - dx =siz/N instead of dx=siz/(N-1), more consistent with physics \n and rest of LP package\n - fftw DLL uses no normalization, numpy uses 1/N on ifft -> omitted\n factor of 1/(2*N)**2 in final calc before return\n - bug in Cpp version: did not touch top row/col, now we extract one\n more row/col to fill entire field. No errors noticed with the new\n method so far\n ************************************************************* \"\"\"\n _using_pyfftw = False # determined if loading is successful \n if usepyFFTW or _USE_PYFFTW:\n try:\n import pyfftw as _pyfftw\n from pyfftw.interfaces.numpy_fft import fft2 as _fft2\n from pyfftw.interfaces.numpy_fft import ifft2 as _ifft2\n _fftargs = {'planner_effort': 'FFTW_ESTIMATE',\n 'overwrite_input': True,\n 'threads': -1} #<0 means use multiprocessing.cpu_count()\n _using_pyfftw = True \n except ImportError:\n #import warnings\n #warnings.warn(_WARNING)\n _WARNING = '\\n**************************** WARNING ***********************\\n'\\\n +'In the Fresnel command you required FFT with the pyFFTW package.\\n'\\\n +'or _USE_PYFFTW = True in your config.py file.\\n'\\\n +'However LightPipes cannot import pyFFTW because it is not installed.\\n'\\\n +'Falling back to numpy.fft.\\n'\\\n +'(Try to) install pyFFTW on your computer for faster performance.\\n'\\\n +'Enter at a terminal prompt: python -m pip install pyfftw.\\n'\\\n +'Or reinstall LightPipes with the option pyfftw\\n'\\\n +'Enter: python -m pip install lightpipes[pyfftw]\\n\\n'\\\n +'*************************************************************'\n print(_WARNING)\n if not _using_pyfftw:\n from numpy.fft import fft2 as _fft2\n from numpy.fft import ifft2 as _ifft2\n _fftargs = {}\n tictoc.tic()\n N = field.shape[0] #assert square\n \n legacy = True #switch on to numerically compare oldLP/new results\n if legacy:\n kz = 2.*3.141592654/lam * z\n siz = N*dx\n dx = siz/(N-1) #like old Cpp code, even though unlogical\n else:\n kz = 2*_np.pi/lam*z\n \n \n cokz = _np.cos(kz)\n sikz = _np.sin(kz)\n \n No2 = int(N/2) #\"N over 2\"\n \"\"\"The following section contains a lot of uses which boil down to\n 2*No2. For even N, this is N. For odd N, this is NOT redundant:\n 2*No2 is N-1 for odd N, therefore sampling an even subset of the\n field instead of the whole field. Necessary for symmetry of first\n step involving Fresnel integral calc.\n \"\"\"\n if _using_pyfftw:\n in_outF = _pyfftw.zeros_aligned((2*N, 2*N),dtype=dtype)\n in_outK = _pyfftw.zeros_aligned((2*N, 2*N),dtype=dtype)\n else:\n in_outF = _np.zeros((2*N, 2*N),dtype=dtype)\n in_outK = _np.zeros((2*N, 2*N),dtype=dtype)\n \n \"\"\"Our grid is zero-centered, i.e. the 0 coordiante (beam axis) is\n not at field[0,0], but field[No2, No2]. The FFT however is implemented\n such that the frequency 0 will be the first element of the output array,\n and it also expects the input to have the 0 in the corner.\n For the correct handling, an fftshift is necessary before *and* after\n the FFT/IFFT:\n X = fftshift(fft(ifftshift(x))) # correct magnitude and phase\n x = fftshift(ifft(ifftshift(X))) # correct magnitude and phase\n X = fftshift(fft(x)) # correct magnitude but wrong phase !\n x = fftshift(ifft(X)) # correct magnitude but wrong phase !\n A numerically faster way to achieve the same result is by multiplying\n with an alternating phase factor as done below.\n Speed for N=2000 was ~0.4s for a double fftshift and ~0.1s for a double\n phase multiplication -> use the phase factor approach (iiij).\n \"\"\"\n # Create the sign-flip pattern for largest use case and \n # reference smaller grids with a view to the same data for\n # memory saving.\n ii2N = _np.ones((2*N),dtype=float)\n ii2N[1::2] = -1 #alternating pattern +,-,+,-,+,-,...\n iiij2N = _np.outer(ii2N, ii2N)\n iiij2No2 = iiij2N[:2*No2,:2*No2] #slice to size used below\n iiijN = iiij2N[:N, :N]\n\n RR = _np.sqrt(1/(2*lam*z))*dx*2\n io = _np.arange(0, (2*No2)+1) #add one extra to stride fresnel integrals\n R1 = RR*(io - No2)\n fs, fc = _fresnel(R1)\n fss = _np.outer(fs, fs) # out[i, j] = a[i] * b[j]\n fsc = _np.outer(fs, fc)\n fcs = _np.outer(fc, fs)\n fcc = _np.outer(fc, fc)\n \n \"\"\"Old notation (0.26-0.33s):\n temp_re = (a + b + c - d + ...)\n # numpy func add takes 2 operands A, B only\n # -> each operation needs to create a new temporary array, i.e.\n # ((((a+b)+c)+d)+...)\n # since python does not optimize to += here (at least is seems)\n New notation (0.14-0.16s):\n temp_re = (a + b) #operation with 2 operands\n temp_re += c\n temp_re -= d\n ...\n Wrong notation:\n temp_re = a #copy reference to array a\n temp_re += b\n ...\n # changing `a` in-place, re-using `a` will give corrupted\n # result\n \"\"\"\n temp_re = (fsc[1:, 1:] #s[i+1]c[j+1]\n + fcs[1:, 1:]) #c[+1]s[+1]\n temp_re -= fsc[:-1, 1:] #-scp [p=+1, without letter =+0]\n temp_re -= fcs[:-1, 1:] #-csp\n temp_re -= fsc[1:, :-1] #-spc\n temp_re -= fcs[1:, :-1] #-cps\n temp_re += fsc[:-1, :-1] #sc\n temp_re += fcs[:-1, :-1] #cs\n \n temp_im = (-fcc[1:, 1:] #-cpcp\n + fss[1:, 1:]) # +spsp\n temp_im += fcc[:-1, 1:] # +ccp\n temp_im -= fss[:-1, 1:] # -ssp\n temp_im += fcc[1:, :-1] # +cpc\n temp_im -= fss[1:, :-1] # -sps\n temp_im -= fcc[:-1, :-1] # -cc\n temp_im += fss[:-1, :-1]# +ss\n \n temp_K = 1j * temp_im # a * b creates copy and casts to complex\n temp_K += temp_re\n temp_K *= iiij2No2\n temp_K *= 0.5\n in_outK[(N-No2):(N+No2), (N-No2):(N+No2)] = temp_K\n \n in_outF[(N-No2):(N+No2), (N-No2):(N+No2)] \\\n = field[(N-2*No2):N,(N-2*No2):N] #cutting off field if N odd (!)\n in_outF[(N-No2):(N+No2), (N-No2):(N+No2)] *= iiij2No2\n \n tictoc.tic()\n in_outK = _fft2(in_outK, **_fftargs)\n in_outF = _fft2(in_outF, **_fftargs)\n t_fft1 = tictoc.toc()\n \n in_outF *= in_outK\n \n in_outF *= iiij2N\n tictoc.tic()\n in_outF = _ifft2(in_outF, **_fftargs)\n t_fft2 = tictoc.toc()\n #TODO check normalization if USE_PYFFTW\n \n Ftemp = (in_outF[No2:N+No2, No2:N+No2]\n - in_outF[No2-1:N+No2-1, No2:N+No2])\n Ftemp += in_outF[No2-1:N+No2-1, No2-1:N+No2-1]\n Ftemp -= in_outF[No2:N+No2, No2-1:N+No2-1]\n comp = complex(cokz, sikz)\n Ftemp *= 0.25 * comp\n Ftemp *= iiijN\n field = Ftemp #reassign without data copy\n ttotal = tictoc.toc()\n t_fft = t_fft1 + t_fft2\n t_outside = ttotal - t_fft\n debug_time = False\n if debug_time:\n print('Time total = fft + rest: {:.2f}={:.2f}+{:.2f}'.format(\n ttotal, t_fft, t_outside))\n return field", "def ldfe(n=3):\n\n # We will use the following coordinate system.\n #\n # | z, top\n # |\n # |\n # |\n # o------- x, right\n # /\n # /\n # /\n # / y, front\n\n # Cube inside the octant that touches the sphere at\n a = 1 / sqrt(3)\n\n # We have three important faces of the cube.\n # Start with the front face and refine it in N segments.\n x = linspace(0, a, n + 1)\n z = linspace(0, a, n + 1)\n\n # Then delta Omega_ij = [x_i,x_i+1] x [z_j,z_j+1]\n # Now go through every cell.\n points = zeros((1 * 1 * 4 * n * n, 3)) # 1/3 of the octants\n weights = zeros(1 * 1 * 4 * n * n)\n square = zeros(1 * 1 * 4 * n * n)\n counter = 0\n rhos0 = 0.1 * ones(4)\n for i in range(n):\n for j in range(n):\n x0, x1, z0, z1 = x[i], x[i + 1], z[j], z[j + 1]\n\n omegas = computeomegas(x0, x1, z0, z1)\n areas = computeareas(omegas, x0, x1, z0, z1)\n print(\"\\n\\nOptimiztation for:\")\n print(\"Domain:\")\n print([x0, x1, z0, z1])\n\n rhos = optimizeposition_leastsquares(areas, omegas, x0, x1, z0, z1,\n rhos0)\n rhos0 = rhos # take the optimal parameter of this cell as the starting value for the optimizer in the next cell\n dummy = rand()\n for k in range(4):\n points[counter, :] = project(omegas[k](rhos[k]))\n weights[counter] = areas[k]\n square[counter] = dummy\n counter += 1\n scatterplot(points, weights, square)\n return points, weights", "def calc_Ls(self, x_surface, geom):\n\n return np.zeros((self.n_wl,))", "def getSkeletonEqs(self):\n xs = np.array(self.XYProjections)[:,0]\n ys = np.array(self.XYProjections)[:,1]\n zs = np.array(self.XZProjections)[:,1]\n\n L = xs[-1] - xs[0]\n self.L = L\n xis = (xs - xs[0]) / L\n\n errorValue = lambda x,y,A: y - np.dot(A, x)\n a_init = np.array([1] * 4)\n\n # Calculate the derivation equation on x-y plane\n # Get the optimal parameters using least squre error method\n a1 = sp.optimize.leastsq(errorValue, a_init, args=(ys, self._H(xis, L)))[0]\n self.alpha_xyPlane = a1\n \n # Derivation\n xi = sy.symbols('xi')\n self.u_xyPlane = (self._H(xi, L, ifsymbol=True) * a1).sum()\n \n # Then calculate the derivation equation on x-z plane\n a2 = sp.optimize.leastsq(errorValue, a_init, args=(zs, self._H(xis, L)))[0]\n self.alpha_xzPlane = a2\n self.u_xzPlane = (self._H(xi, L, ifsymbol=True) * a2).sum()", "def compute_mixing_coefficients_surf(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n\n # SET UP NEW MIXING COEFFICIENT ARRAYS\n self.Kv_surf = np.zeros([Ly,N+1])\n self.Kt_surf = np.zeros([Ly,N+1])\n \n self.ghat = np.zeros([Ly,N+1])\n \n\n #################################\n # \tSURFACE KPP\n ################################\n #---> j-loop\n \n self.wm2 = []\n self.ws2 = []\n self.sigma_y = []\n for j in range(Ly):\n #--> k-loop (top to kbl[j])\n # in fortran k=N-1,kbl(j),-1\n for k in range(N-1,self.kbl[j]-1,-1):\n k_w = k\n k_r = k-1\n\n Bfsfc = self.Bfsfc_bl[j]\n zscale = z_u_w[j,N] - z_u_w[j,k_w]\n \n # CALCULATE TURBULENT VELOCITY SCALES\n wm,ws = self.lmd_wscale_wm_and_ws(Bfsfc,zscale,self.ustar[j],self.hbls[j])\n self.wm2.append(wm)\n self.ws2.append(ws)\n # COMPUTE VERTICAL MIXING COEFFICIENTS\n sigma = (z_u_w[j,N] - z_u_w[j,k_w]) / np.max([self.hbls[j],self.eps])\n self.sigma1 = sigma #for debugging\n if j == 25: \n self.sigma_y.append(sigma)\n a1 = sigma - 2.\n a2 = 3.-2.*sigma\n a3 = sigma - 1.\n\n if sigma < 0.07:\n cff = 0.5 * (sigma-0.07)**2/0.07\n else:\n cff = 0\n \n \n if k == N-1: \n self.wm_debug = wm\n self.hbls_debug = self.hbls[j]\n self.cff_debug = cff\n self.sigma_debug = sigma\n self.a1_debug = a1\n self.a2_debug = a2\n self.a3_debug = a3\n\n self.Kv_surf[j,k_w] = wm * self.hbls[j] * ( cff + sigma * (1. + sigma * (\\\n a1 + a2*self.Gm1[j]+a3*self.dGm1_dS[j])))\n\n if k == N-1:\n self.ws_debug = ws\n self.hbls_debug = self.hbls[j]\n self.cff_debug = cff\n self.sigma_debug = sigma\n self.a1_debug = a1\n self.a2_debug = a2\n self.a3_debug = a3\n \n self.Kt_surf[j,k_w] = ws * self.hbls[j] * ( cff + sigma * (1. + sigma * (\\\n a1 + a2*self.Gt1[j]+a3*self.dGt1_dS[j])))\n #---> end k-loop \n if self.LMD_NONLOCAL:\n if Bfsfc < 0:\n self.ghat[j,k_w] = 0\n self.ghat[j,k_w] = self.Cg * sigma * (1.-sigma)**2\n else:\n self.ghat[j,k_w] = 0.\n\n # ADD CONVECTIVE ADJUSTMENT IN SURFACE MIXED LAYER \n if self.LMD_CONVEC and self.MLCONVEC: \n for k in range(N-1,int(self.kbl[j]-1),-1):\n k_w = k\n k_r = k -1\n\n if self.bvf[j,k_w] < 0:\n self.Kt_surf[j,k_w] = self.Kt_surf[j,k_w] + self.ffac*self.nu0c\n\n # ADD CONVECTIVE ADJUSTMENT BELOW SURFACE MIXED LAYER\n # IF BKPP IS SWITCHED OFF!!\n for k in range(int(self.kbl[j]-1),-1,-1):\n k_w = k\n k_r = k -1\n if self.LMD_NONLOCAL:\n self.ghat[j,k_w] = 0\n if self.LMD_CONVEC and self.LMD_BKPP == False:\n if self.bvf[j,k_w] < 0:\n self.Kv_surf[j,k_w] = self.Kv_surf[j,k_w] + self.nu0c\n self.Kt_surf[j,k_w] = self.Kt_surf[j,k_w] + self.nu0c\n \n\n #---> end j-loop", "def calculateElementCoefficients(self):\n #\n #get u,grad(u), and grad(u)Xgrad(w) at the quadrature points\n #\n for cj in range(self.nc):\n self.u[cj].getValues(self.q[('v',cj)],\n self.q[('u',cj)])\n if self.q.has_key(('grad(u)',cj)):\n self.u[cj].getGradientValues(self.q[('grad(v)',cj)],\n self.q[('grad(u)',cj)])\n #\n #get functions of (t,x,u) at the quadrature points\n #\n self.coefficients.evaluate(self.timeIntegration.t,self.q)\n log(\"Coefficients on element\",level=10,data=self.q)\n #\n # time integration is handled directly in ELLAM weak approximation, don't have a hook for\n # doing that via a time integration object (could if it were a direct Lagrange Galerkin formulation I believe)\n # however, need to set time integration's m_tmp if use that anywhere\n #if self.timeTerm:\n # self.timeIntegration.calculateElementCoefficients(self.q)\n\n #todo eventually can add nonlinear potential here\n\n #cek and mwf need to go through this section to clean up, some of next two blocks could go to calcQuad\n #\n #todo need non-diagonal dependence?\n for ci in range(self.nc):\n cfemIntegrals.calculateCFLADR(self.elementEffectiveDiametersArray,\n self.q[('dm',ci,ci)],\n self.q[('df',ci,ci)],#could just be velocity\n self.q[('cfl',ci)])", "def get_F(self,neighbours,vs):\n J = self.J_large[:self.n_c,:self.n_c]\n # J[:self.n_C,:self.n_C] = self.J\n # J[self.n_C:,self.n_C:] = 0\n J_CW = J[self.tris, roll_forward(self.tris)]\n J_CCW = J[self.tris, roll_reverse(self.tris)]\n F = get_F(vs, neighbours, self.tris, self.CV_matrix, self.n_v, self.n_c, self.L, J_CW, J_CCW, self.A, self.P, self.Cents, self.kappa_A, self.kappa_P, self.A0, self.P0,self.n_C,self.kappa_B,self.l_b0)\n return F", "def compute_forces_mesh(self):\n f = self.ptclgrid.grid[:self.size,:self.size]*self.grad_phi_mesh()\n return f", "def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)", "def calcEVals(self):\n self.eVals,self.eVecs = np.linalg.eigh(self.rhoOp)", "def intern_F(self):\n if self.A is None:\n def Fx(x,y):\n if self.hx is None:\n fx = self.gradf(x)\n self.Fz = fx, None, None\n return fx, None, None\n else:\n vec_prod = np.zeros(len(x))\n fy = np.zeros(len(y))\n for i in range(len(y)):\n gh = self.gradh[i+1](x,i+1)\n vec_prod += y[i] * gh\n if self.optimized:\n fy[i] = -self.hx[i+1](x, i+1, gh)\n else:\n fy[i] = -self.hx[i+1](x, i+1)\n fx = self.gradf(x)+ vec_prod\n self.Fz = fx, fy, None\n return fx, fy, None\n else:\n def Fx(x,y,u):\n if self.hx is None:\n fx = self.gradf(x)\n fu = self.b-self.A@x\n self.Fz = fx, None, fu\n return fx, None, fu\n else:\n vec_prod = np.zeros(len(x))\n fy = np.zeros(len(y))\n for i in range(len(y)):\n gh = self.gradh[i+1](x,i+1)\n vec_prod += y[i] * gh\n if self.optimized:\n fy[i] = -self.hx[i+1](x, i+1, gh)\n else:\n fy[i] = -self.hx[i+1](x, i+1)\n fx = self.gradf(x)+ vec_prod\n fu = self.b-self.A@x\n self.Fz = fx, fy, fu\n return fx, fy, fu\n return Fx", "def local_forces(elements, mats, nodes, neq, DME_mat , UC):\r\n IELCON = np.zeros([2], dtype=np.integer)\r\n nels = elements.shape[0]\r\n nnodes = 2\r\n#\r\n for el in range(nels):\r\n iet = np.int(elements[el , 1])\r\n if iet == 0:\r\n ndof = 6\r\n FG = np.zeros((nels, 6))\r\n ul = np.zeros(6)\r\n fl = np.zeros(6)\r\n elif iet == 1:\r\n ndof = 4\r\n FG = np.zeros((nels, 4))\r\n ul = np.zeros(4)\r\n fl = np.zeros(4) \r\n#\r\n for el in range(nels):\r\n#\r\n iet = np.int(elements[el , 1]) \r\n#\r\n elcoor = np.zeros([nnodes, 2])\r\n im = np.int(elements[el , 2])\r\n par0 = mats[im , 0] # Iz\r\n par1 = mats[im , 1] # Emod\r\n par2 = mats[im , 2] # A\r\n for j in range(nnodes):\r\n IELCON[j] = elements[el , j+3]\r\n elcoor[j, 0] = nodes[IELCON[j] , 1]\r\n elcoor[j, 1] = nodes[IELCON[j] , 2] \r\n for j in range(ndof):\r\n ig = DME_mat[el, j]\r\n ul[j] = UC[ig] \r\n if iet == 0: \r\n fl = reac_beam2D(elcoor , par0, par1 , par2 , ul)\r\n elif iet == 1: \r\n fl = reac_beam2DU(elcoor , par0, par1 , ul)\r\n FG[el , :] = fl[:]\r\n \r\n return FG", "def computesWingsMeshPoints(self):\n self.userAskedNNodesWings = np.zeros(self.nWings)\n self.ws_me_points = []\n self.ws_ma_points = []\n self.ws_me_distances = []\n self.ws_sg_lengths = []\n self.ws_ma_distance = []\n self.ws_me_pointsName = []\n self.ws_me_pointsInitArea = []\n for i in range(self.nWings):\n\n # Basic wing input check\n self.userAskedNNodesWings[i] = self.settings[\"wing\"+str(i+1)][\"FEM\"][\"nodesFEM\"]\n w_m_N_nodes = int(self.userAskedNNodesWings[i])\n if w_m_N_nodes < 2:\n logger.error(\"Not enough points for wing\"+str(i+1)+\" (min 2)\")\n sys.exit()\n\n logger.debug(\"Number of wing nodes asked: \"+str(w_m_N_nodes))\n # distance from leading edge to the elastic axis\n ##################################################################\n # Be very careful with what verion of TiGl you are using! It looks\n # like the order is inverted in last version at least I have\n # eperianced some issues between home and CFSE computer.\n ##################################################################\n # xsiEl = 1 - self.settings['wing' + str(i+1)]['elasticAxis']\n xsiEl = self.settings['wing' + str(i+1)]['elasticAxis']\n \n # distance between the mass axis and the elastic axis\n xsiMa = self.settings['wing' + str(i+1)]['massAxis']\n logger.debug(\"Wing\"+str(i+1)+\" Elastic center is: \"+str(xsiEl))\n wingIndex = i+1\n\n # Gets the number of segment and sections for each wing\n w_N_sg = self.tigl.wingGetSegmentCount(i+1)\n w_N_sc = self.tigl.wingGetSectionCount(i+1)\n logger.debug(\"Wing\"+str(i+1)+\" has \"+str(w_N_sg)+\" segments\")\n logger.debug(\"Wing\"+str(i+1)+\" has \"+str(w_N_sc)+\" sections\")\n if w_m_N_nodes < w_N_sc:\n logger.warning(\"Wing mesh underdetermined, less points than actual CPACS sections\")\n\n # Gets each segments starting and ending points\n w_sg_points = np.empty((w_N_sg+1,3))\n for j in range(w_N_sg):\n w_sg_points[j] = self.getWingCamberLinePoint(wingIndex,j+1,0,xsiEl)\n w_sg_points[-1] = self.getWingCamberLinePoint(wingIndex,j+1,1,xsiEl)\n logger.debug(\"Wing\"+str(wingIndex)+\" segment points:\\n\"+str(w_sg_points))\n\n # Gets each segments length\n w_sg_length = np.empty(w_N_sg)\n w_sg_relativePosition = np.empty(w_N_sg+1)\n w_length = 0\n for j in range(w_N_sg):\n w_sg_relativePosition[j] = w_length\n length = np.linalg.norm(w_sg_points[j] - w_sg_points[j+1])\n w_sg_length[j] = length\n w_length += length\n w_sg_relativePosition[-1] = w_length\n logger.debug(\"Wing\"+str(wingIndex)+\" segments lengths are:\\n\"+str(w_sg_length))\n logger.debug(\"Wing\"+str(wingIndex)+\" segments relative positions are:\\n\"+str(w_sg_relativePosition))\n logger.debug(\"Wing\"+str(wingIndex)+\" length is:\"+str(w_length))\n\n # Computes mesh relative points\n w_m_relativePoints = np.linspace(0, w_length, w_m_N_nodes)\n logger.debug(\"Wing\"+str(wingIndex)+\" relative mesh points:\\n\"+str(w_m_relativePoints))\n\n # If the user askes more points that there sections in the CPACS\n # file definitions the program automatically changes the position\n # to the closest known point to the center of the section. This\n # features ensures that the simulations will be made with maximal\n # fidelity to the definintion.\n #\n # WARNING:\n # After some testing it looks like this feature induces errors\n # instead of erasing them.\n #\n # logger.debug(\"+\"*20)\n # logger.debug(\"wing relative pos:\\n\"+str(w_sg_relativePosition))\n # logger.debug(\"mesh relative pos:\\n\"+str(w_m_relativePoints))\n # if w_N_sc <= w_m_N_nodes:\n # for j in range(w_N_sc):\n # diff = np.abs(w_m_relativePoints - w_sg_relativePosition[j])\n # index = np.argmin(diff)\n # w_m_relativePoints[index] = w_sg_relativePosition[j]\n\n logger.debug(\"mesh relative pos:\\n\"+str(w_m_relativePoints))\n\n # Computes the eta for each segment in order to get the mesh point\n # from tigl\n w_me_points = np.empty((w_m_N_nodes,3))\n w_ma_points = np.empty((w_m_N_nodes,3))\n w_me_distances = np.empty((w_m_N_nodes-1))\n w_ma_distance = np.empty((w_m_N_nodes,3))\n w_me_pointsName = []\n w_me_pointsInitArea = np.empty(w_m_N_nodes)\n for j in range(w_m_N_nodes):\n # finds in which segment the mesh point will be\n relativePosition = w_m_relativePoints[j]\n dist = w_sg_relativePosition - relativePosition\n segmentIndex = np.argmin(np.abs(dist))+1\n # o--x-------o situations\n if dist[segmentIndex-1] < 0:\n case = 1\n eta = w_m_relativePoints[j] - w_sg_relativePosition[segmentIndex-1]\n eta = (eta/w_sg_length[segmentIndex-1])\n # o--x-------o situation\n elif dist[segmentIndex-1] > 0:\n case = 2\n eta = w_sg_relativePosition[segmentIndex-1] - w_m_relativePoints[j]\n segmentIndex = segmentIndex - 1\n eta = 1 - (eta/w_sg_length[segmentIndex-1])\n elif dist[segmentIndex-1] == 0.0 and segmentIndex == 1:\n case = 3\n eta = 0\n elif dist[segmentIndex-1] == 0.0 and segmentIndex != 1:\n case = 4\n eta = 1\n segmentIndex -= 1\n else:\n logger.error(\"Something wrong with CPACS file\")\n sys.exit()\n # logger.debug()\n logger.debug(\"case \"+str(case)+\" eta = \"+str(eta))\n\n # Gets the wing mesh points. Theses points will be always on\n # the camber line.\n w_me_points[j] = self.getWingCamberLinePoint(wingIndex,segmentIndex,eta,xsiEl)\n w_ma_points[j] = self.getWingCamberLinePoint(wingIndex,segmentIndex,eta,xsiEl-xsiMa)\n if j > 0:\n length = np.linalg.norm(w_me_points[j] - w_me_points[j-1])\n w_me_distances[j-1] = length\n # Distance from elastic axis\n w_ma_distance[j] = w_me_points[j] - w_ma_points[j]\n name = \"w_\"+str(i+1)+\"_n_\"+str(j)\n if self.nFuselage == 0:\n if np.abs(w_me_points[j][1]) < 1e-2:\n name = \"w_n_clamped\"\n w_me_pointsName.append(name)\n # Computes section area\n area = self.computePointSectionArea(wingIndex,segmentIndex,eta,xsiEl)\n w_me_pointsInitArea[j] = area\n \n logger.debug(w_me_points)\n logger.debug(w_ma_points)\n # sys.exit()\n # For reference, in tigl3wrapper.py the symmetry is defined as such:\n #\n # class TiglSymmetryAxis(object):\n # TIGL_NO_SYMMETRY = 0\n # TIGL_X_Y_PLANE = 1\n # TIGL_X_Z_PLANE = 2\n # TIGL_Y_Z_PLANE = 3\n symmetry = self.tigl.wingGetSymmetry(i+1)\n if symmetry > 0:\n w_me_points_copy = np.copy(w_me_points)\n w_ma_points_copy = np.copy(w_ma_points)\n w_ma_distance_copy = np.copy(w_ma_distance)\n w_me_pointsName_copy = w_me_pointsName.copy()\n w_me_pointsInitArea_c = np.copy(w_me_pointsInitArea)\n if symmetry == 1:\n index = 2\n elif symmetry == 2:\n index = 1\n elif symmetry == 3:\n index = 0\n\n # Computes symmetric points\n for k in range(w_m_N_nodes):\n w_me_points_copy[k][index] = - w_me_points[k,index]\n w_ma_points_copy[k][index] = - w_ma_points[k,index]\n w_ma_distance_copy[k][index] = - w_ma_distance[k,index]\n w_me_pointsName_copy[k] = w_me_pointsName_copy[k] + \"sym\"\n # The -1 avoids copying two times the \"same\" point\n w_me_points = np.concatenate((np.flip(w_me_points_copy[1:],axis=0),w_me_points))\n w_ma_points = np.concatenate((np.flip(w_ma_points_copy[1:],axis=0),w_ma_points))\n w_me_distances = np.concatenate((np.flip(w_me_distances), w_me_distances))\n w_ma_distance = np.concatenate((np.flip(w_ma_distance_copy[1:],axis=0), w_ma_distance))\n rev = w_me_pointsName_copy[::-1]\n w_me_pointsName = rev[:-1] + w_me_pointsName\n # logger.debug(w_m_pointsInitArea)\n # logger.debug(np.flip(w_me_pointsInitArea_c))\n w_me_pointsInitArea = np.concatenate((np.flip(w_me_pointsInitArea_c[1:],axis=0),w_me_pointsInitArea))\n\n logger.debug(\"Wing mesh points:\\n\"+str(w_me_points))\n self.ws_me_points.append(w_me_points)\n self.ws_ma_points.append(w_ma_points)\n\n # me_distance is the distance betweent two points of the strcutral\n # mesh size\n self.ws_me_distances.append(w_me_distances)\n # self.ws_sg_lengths.append(w_sg_length)\n\n # mass distance is the distance between the elastic line and the\n # mass line\n self.ws_ma_distance.append(w_ma_distance)\n self.ws_me_pointsInitArea.append(w_me_pointsInitArea)\n self.ws_me_pointsName.append(w_me_pointsName)", "def solve_VFI(self):\r\n dimC = self.dimA ; dimA = self.dimA ; dimW = self.dimW \r\n C = self.c_grid ; A = self.a_grid ; W = self.W_grid\r\n tol = self.tol ; Niter = self.Niter ; R = self.R\r\n beta = self.beta ; Pi = self.Pi\r\n \r\n V0 = np.zeros((dimA,dimC,dimW))\r\n V1 = np.zeros((dimA,dimC,dimW))\r\n Pol = np.zeros((dimA,dimC,dimW))\r\n U = np.zeros((dimA,dimC,dimW))\r\n \r\n t0 = time()\r\n diff = 1 ; niter = 0\r\n \r\n while diff > tol:\r\n niter += 1\r\n # Value update step\r\n for ia in range(dimA):\r\n for ic in range(dimC):\r\n for iw in range(dimW):\r\n c = W[iw] + R*A[ia] - A\r\n x = C[ic]\r\n \r\n c[c < 0] = np.nan \r\n if x < 0:\r\n x = np.nan\r\n \r\n u = self.u(c,x) \r\n U[:,ic,iw] = u \r\n \r\n Objective = U + beta * V0 @ Pi.T\r\n V1[ia,:,:] = np.nanmax(Objective, axis = 0)\r\n Pol[ia,:,:] = np.nanargmax(Objective, axis = 0)\r\n \r\n # Evaluate distance between the value functions\r\n diff = np.max(np.max(np.abs(V1 - V0))) \r\n V0[:] = V1\r\n \r\n # Break the while loop if too many iterations\r\n #print(\"The current error is \"+str(diff))\r\n if niter > Niter:\r\n print('Ops, no convergence')\r\n break\r\n \r\n t1 = time()\r\n #print('VFI algorithm took {0:0d} iterations and {1:.2f} seconds.'.format(niter, t1 - t0))\r\n \r\n self.V1 = V1 ; self.Pol = Pol", "def compute_force(X, V, bl, ip, box, gamma, kT, dt):\n N = len(X)\n F = np.zeros((N, 3))\n Fcube = np.zeros((N, N, 3))\n inv_box = np.zeros((3, 3))\n for i in range(3): inv_box[i, i] = 1.0 / box[i, i]\n g = np.zeros(3)\n rij = np.zeros(3)\n vij = np.zeros(3)\n a = 0.0\n nr = 0.0\n fpair = 0.0\n\n vir = 0.0\n sigma = np.zeros(3)\n volume = np.linalg.det(box)\n\n for i in range(N):\n for j in range(i):\n rij = X[i] - X[j]\n g = matvecmul(inv_box, rij)\n g = g - np.round_(g, 0, np.empty_like(g))\n rij = matvecmul(box, g)\n vij = V[i] - V[j]\n\n a = ip[bl[i]-1, bl[j]-1]\n nr = norm_numba(rij)\n\n fc = a * wr(nr)\n fpair = fc \\\n - gamma * wr(nr)**2 * dot_numba(rij, vij) / nr \\\n + sqrt(2.0*gamma*kT) * wr(nr) * np.random.randn() / sqrt(dt)\n Fcube[i, j, :] = fpair / nr * rij\n Fcube[j, i, :] = -fpair / nr * rij\n\n vir += Fcube[i, j, :] @ rij\n sigma += Fcube[i, j, :] * rij\n\n # kinetic part of stress tensor\n for i in range(N):\n sigma += V[i] * V[i]\n\n sigma = sigma / volume\n F = np.sum(Fcube, 1)\n\n return F, vir, sigma", "def method1(self):\n cres=0. # Variable for storing Chern number.\n # The U matrices from Fukui's method; storage...\n Ux=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n # ... and calculation of U matrices\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.alleigvecs[:,:,ix ,iy ]\n if ix<self.kS.Nx:\n mat2=self.alleigvecs[:,:,ix+1,iy ]\n else:\n mat2=self.alleigvecs[:,:,1 ,iy ]\n if iy<self.kS.Ny:\n mat3=self.alleigvecs[:,:,ix ,iy+1]\n else:\n mat3=self.alleigvecs[:,:,ix ,1 ]\n Ux[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[:self.NL,:self.NL])\n Uy[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[:self.NL,:self.NL])\n \n # Local estimates of Berry curvature; storage ...\n ftempall=np.zeros((self.kS.Nx,self.kS.Ny),complex)\n # ... and calculation\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux[ix,iy]*Uy[ix+1,iy]/Ux[ix,iy+1]/Uy[ix,iy])\n ftempall[ix,iy]=ftemp # ... of local Berry curvature ...\n cres+=ftemp/2./pi/1j # ... and of Berry phase (Chern number).\n\n return cres.real, ftempall", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_H2 = self.edp_par['rho_H2'].value\n Z_H2 = self.edp_par['Z_H2'].value\n sigma_H2 = self.edp_par['sigma_H2'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n \n # Make sure Z_H2 > Z_H1. If Z_H2 < Z_H1, swap them\n if Z_H1 > Z_H2:\n Z_H1, Z_H2 = Z_H2, Z_H1\n sigma_H1, sigma_H2 = sigma_H2, sigma_H1\n rho_H1, rho_H2 = rho_H2, rho_H1\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H2 + sigma_H2\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG += 2*rho_H2*sigma_H2 * cos(alpha*Z_H2) * exp(-0.5*(alpha*sigma_H2)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)" ]
[ "0.66401815", "0.5937063", "0.59241253", "0.57615346", "0.5728458", "0.5698205", "0.5588096", "0.5559542", "0.55237275", "0.54786545", "0.5427542", "0.54004824", "0.5383396", "0.5380944", "0.5374193", "0.5373815", "0.5343213", "0.53354645", "0.5324629", "0.5318333", "0.5314448", "0.53135115", "0.53125095", "0.5301839", "0.5283879", "0.52819145", "0.52800405", "0.52789396", "0.52624726", "0.52485555" ]
0.6225645
1
This method defines the mesh points for a given layoutType
def run(self): # Dictionaries whose keys are labels of the points in a 2-D grid and values # are an instance of the class meshPoint holding the informaiton about # that mesh point self.boundaryPoints = {} self.internalPoints = {} # Rectangle if self.layoutType.lower() == 'rectangle': # Define the mesh for a rectanglular layout self.defineRectangleLayout() # Circle elif self.layoutType.lower() == 'circle': # Define the mesh for a circular layout self.defineCircleLayout() return [self.internalPoints,self.boundaryPoints]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_kpoints(self):\n kpoints_mesh = KpointsData()\n kpoints_mesh.set_cell_from_structure(self.inputs.structure)\n kpoints_mesh.set_kpoints_mesh_from_density(\n distance=self.ctx.protocol['kpoints_mesh_density'],\n offset=self.ctx.protocol['kpoints_mesh_offset']\n )\n\n self.ctx.kpoints_mesh = kpoints_mesh", "def defineCircleLayout(self):\n # Define a 2-D array representing the position of each mesh point\n self.xPoints = self.frange(0,self.R,self.h)\n self.yPoints = self.frange(0,self.R,self.h)\n\n # Position of internal mesh points\n internal_xyCoord = [(i,j) for i in self.xPoints for j in self.yPoints if (i - self.R)**2 + (j - self.R)**2 < self.R^2] \n\n # Define the dictionary containing internal points\n for k in internal_xyCoord:\n x = k[0]\n y = k[1]\n xLabel = xPoints.index(x)\n yLabel = yPoints.index(y)\n self.internalPoints[(xLabel,yLabel)] = meshPoint(type = 'internal',x = x, y = y, xLabel = xLabel, yLabel = yLabel) \n\n # Position of the boundary points\n # Find the intersection of each mesh line with the circle\n # For a given vertical mesh line: \n # y = R - sqrt(R^2 - (x-R)^2) & y = R + sqrt(R^2 - (x-R)^2)\n # For a given horizontal mesh line: \n # x = R - sqrt(R^2 - (y-R)^2) & x = R + sqrt(R^2 - (y-R)^2)\n boundary_xyCoord = [(0,self.R),(self.R,0),(self.R,2*self.R),(2*self.R,self.R)] + [(x,self.R - math.sqrt(self.R**2 - (x-self.R)**2)) for x in self.xPoints[1:len(self.xPoints)-1]] + [(x,self.R - math.sqrt(self.R**2 + (x-self.R)**2)) for x in self.xPoints[1:len(self.xPoints)-1]] + [(self.R - math.sqrt(self.R**2 - (y-self.R)**2),y) for y in self.yPoints[1:len(yPoints)-1]] + [(self.R + math.sqrt(self.R**2 - (y-self.R)**2),y) for y in self.yPoints[1:len(yPoints)-1]] \n\n # Define the dictionary containing boundary points\n for k in boundary_xyCoord:\n x = k[0]\n y = k[1]\n [xLabel,yLabel] = self.findLabel(x,y)\n self.boundaryPoints[(xLabel,yLabel)] = meshPoint(type = 'boundary',x = x, y = y, xLabel = xLabel, yLabel = yLabel) \n \n # Now that we have assigned the labels we can define fE, fW, fN and fS\n self.fCalc()", "def getXYZ(self, layout=None):\n def mesh2d(xlim, ylim):\n xx = [[xlim[0],xlim[1]],\n [xlim[0],xlim[1]]]\n yy = [[ylim[0],ylim[0]],\n [ylim[1],ylim[1]]]\n return xx,yy\n def calc(x, y, z, normal, offset):\n unknown = normal.dot(offset) - normal.vec[0]*x - normal.vec[1]*y - normal.vec[2]*z\n return unknown\n \n xlim = [-1,1]\n ylim = [-1,1]\n zlim = [-1,1]\n n = self.normal\n off = self.offset\n \n if n.vec[2] == 0:\n if n.vec[1] == 0:\n if n.vec[0] == 0:\n raise ValueError(\"Normal vector is zero vector.\")\n else:\n #cannot generate z or y but can x, try generating x for yz mesh\n yy, zz = mesh2d(ylim, zlim)\n xx = [[None,None],[None,None]]\n for i in [0,1]:\n for j in [0,1]:\n xx[i][j] = calc(0, yy[i][j], zz[i][j], n, off)\n else:\n #cannot generate z but can y, try generating y for xz mesh\n xx, zz = mesh2d(xlim, zlim)\n yy = [[None,None],[None,None]]\n for i in [0,1]:\n for j in [0,1]:\n yy[i][j] = calc(xx[i][j], 0, zz[i][j], n, off)\n else:\n #try generating z\n xx, yy = mesh2d(xlim, ylim)\n zz = [[None,None],[None,None]]\n for i in [0,1]:\n for j in [0,1]:\n zz[i][j] = calc(xx[i][j], yy[i][j], 0, n, off)\n return xx, yy, zz", "def _generate_geometry_from_points(self, geometry_type, points):\n if geometry_type == 'line':\n # Only x and y coordinates are considered for line\n geometry = LineString([(x[0], x[1]) for x in points])\n elif geometry_type == 'area':\n # Only x and y coordinates are considered for polygon area\n geometry = Polygon([(x[0], x[1]) for x in points])\n else:\n raise NotImplementedError()\n return geometry", "def defineRectangleLayout(self):\n\n #--- Define a 2-D mesh ---\n # Divide x- and y-axis\n self.xPoints = self.frange(0,self.Lx,self.h)\n self.yPoints = self.frange(0,self.Ly,self.h)\n\n # Position (xy-coordinate) of boundary points\n boundary_xyCoord = [(0,j) for j in self.yPoints] + [(self.Lx,j) for j in self.yPoints] + [(i,0) for i in self.xPoints[1:len(self.xPoints)-1]] + [(i,self.Ly) for i in self.xPoints[1:len(self.xPoints)-1]] \n\n # Define the dictionary containing boundary points\n for k in boundary_xyCoord:\n x = k[0]\n y = k[1]\n xLabel = self.xPoints.index(x)\n yLabel = self.yPoints.index(y)\n self.boundaryPoints[(xLabel,yLabel)] = meshPoint(type = 'boundary',x = x, y = y, xLabel = xLabel, yLabel = yLabel) \n \n # Position (xy-coordinate) of internal mesh points\n internal_xyCoord = [(i,j) for i in self.xPoints[1:len(self.xPoints)-1] for j in self.yPoints[1:len(self.yPoints)-1]] \n\n # Define the dictionary containing internal points\n for k in internal_xyCoord:\n x = k[0]\n y = k[1]\n xLabel = self.xPoints.index(x)\n yLabel = self.yPoints.index(y)\n self.internalPoints[(xLabel,yLabel)] = meshPoint(type = 'internal',x = x, y = y, xLabel = xLabel, yLabel = yLabel) \n\n # Now that we have assigned the labels we can define fE, fW, fN and fS\n self.fCalc()", "def new_mesh_set(self, all_meshes):\n if isinstance(all_meshes, Mesh):\n mesh_tp = []\n mesh_tp.append(all_meshes)\n all_meshes = mesh_tp\n\n if not isinstance(all_meshes, list):\n raise TypeError(\"Please send a list of mesh to update_mesh\")\n self.all_meshes = all_meshes\n\n # Remove previous actors from the scene\n for actor in self.mesh_actors:\n self.parent_window.ren.RemoveActor(actor)\n self.mesh_actors = list()\n\n # Create the geometry of a point (the coordinate) points = vtkPoints()\n for i, mesh in enumerate(self.all_meshes):\n if mesh.time.size != 1:\n raise IndexError(\"Mesh should be from one frame only\")\n\n points = vtkPoints()\n for j in range(mesh.channel.size):\n # points.InsertNextPoint([0, 0, 0])\n points.InsertNextPoint(mesh.data[:3, j, 0].tolist())\n\n # Create an array for each triangle\n draw_patch = not mesh.automatic_triangles and not self.force_wireframe\n if draw_patch:\n poly_type = vtkPolygon\n n_ids = 3\n color = self.patch_color[i]\n else:\n poly_type = vtkPolyLine\n n_ids = 4\n color = self.mesh_color\n cells = vtkCellArray()\n\n # Create the polygons\n for j in range(mesh.triangles.shape[1]):\n poly = poly_type()\n poly.GetPointIds().SetNumberOfIds(n_ids) # make a tri\n for k in range(len(mesh.triangles[:, j])):\n poly.GetPointIds().SetId(k, mesh.triangles[k, j])\n if not draw_patch:\n poly.GetPointIds().SetId(3, mesh.triangles[0, j]) # Close the triangle\n cells.InsertNextCell(poly)\n\n poly_data = vtkPolyData()\n poly_data.SetPoints(points)\n if draw_patch:\n poly_data.SetPolys(cells)\n else:\n poly_data.SetLines(cells)\n\n mapper = vtkPolyDataMapper()\n mapper.SetInputData(poly_data)\n\n # Create an actor\n self.mesh_actors.append(vtkActor())\n self.mesh_actors[i].SetMapper(mapper)\n self.mesh_actors[i].GetProperty().SetColor(color)\n self.mesh_actors[i].GetProperty().SetOpacity(self.mesh_opacity)\n\n self.parent_window.ren.AddActor(self.mesh_actors[i])\n\n # Update marker position\n self.update_mesh(self.all_meshes)", "def Parallelepiped(self,lower_left_rear_point=(0,0,0), upper_right_front_point=(2,4,10),\n nx=2, ny=4, nz=10, element_type=\"hex\"):\n\n if self.elements is not None and self.points is not None:\n self.__reset__()\n\n if element_type != \"tet\" and element_type != \"hex\":\n raise ValueError(\"Can only generate parallelepiped mesh using tetrahedrals or hexahedrals\")\n\n if (lower_left_rear_point[0] > upper_right_front_point[0]) or \\\n (lower_left_rear_point[1] > upper_right_front_point[1]) or \\\n (lower_left_rear_point[2] > upper_right_front_point[2]):\n raise ValueError(\"Incorrect coordinate for lower left rear and upper right front vertices\")\n\n nx, ny, nz = int(nx), int(ny), int(nz)\n if nx <= 0 or ny <= 0 or nz <= 0:\n raise ValueError(\"Number of discretisation cannot be zero or negative: nx={} ny={} nz={}\".format(nx,ny,nz))\n\n\n x=np.linspace(lower_left_rear_point[0],upper_right_front_point[0],nx+1)\n y=np.linspace(lower_left_rear_point[1],upper_right_front_point[1],ny+1)\n z=np.linspace(lower_left_rear_point[2],upper_right_front_point[2],nz+1)\n\n Y,X,Z = np.meshgrid(y,x,z)\n coordinates = np.dstack((X.T.flatten(),Y.T.flatten(),Z.T.flatten()))[0,:,:]\n\n self.element_type = \"hex\"\n self.nelem = int(nx*ny*nz)\n elements = np.zeros((self.nelem,8),dtype=np.int64)\n\n dum_0 = np.arange((nx+1)*ny)\n dum_1 = np.array([(nx+1)*i+nx for i in range(ny)])\n dum_2 = np.delete(dum_0,dum_1)\n col0 = np.array([dum_2+i*(nx+1)*(ny+1) for i in range(nz)]).flatten()\n\n elements[:,0] = col0\n elements[:,1] = col0 + 1\n elements[:,2] = col0 + nx + 2\n elements[:,3] = col0 + nx + 1\n elements[:,4] = col0 + (nx + 1) * (ny + 1)\n elements[:,5] = col0 + (nx + 1) * (ny + 1) + 1\n elements[:,6] = col0 + (nx + 1) * (ny + 1) + nx + 2\n elements[:,7] = col0 + (nx + 1) * (ny + 1) + nx + 1\n\n self.elements = elements\n self.points = coordinates\n self.nnode = self.points.shape[0]\n\n self.GetBoundaryFacesHex()\n self.GetBoundaryEdgesHex()\n\n if element_type == \"tet\":\n sys.stdout = open(os.devnull, \"w\")\n self.ConvertHexesToTets()\n sys.stdout = sys.__stdout__", "def generate(pts):\n cmds.polyCreateFacet(name=\"shirt\", p=points)\n cmds.polyTriangulate()\n cmds.polySubdivideFacet(dv=SUBDIVISIONS)\n cmds.polyTriangulate()", "def addPoints(self):\n numDims = len(self.relation.fieldNames) - 1\n datasets = self.relation.getScaledDatasets()\n for ds in datasets:\n points = []\n lines = []\n for i in range(numDims):\n p = PlotPoint(self, ds[i], ds[-1])\n p.setParentItem(self.axes[i])\n points.append(p)\n\n if 0 < i:\n lines.append(PlotLine(self, points[i - 1], p))\n if i == numDims - 1:\n lines.append(PlotLine(self, p, points[0]))\n\n group = self.scene().createItemGroup(lines)\n group.dataClassLabel = points[0].cls\n self.lineGroups.append(group)", "def create_partition(mesh,polygons,enforce_exact=False):", "def create_points(self):\n v1 = 0.0\n v2 = 0.5\n v3 = 0.25\n v4 = 0.2 # only used for hexgrid\n\n points = []\n\n points.append((v1, v1, v1)) # 0\n points.append((v2, v1, v1)) # 1\n points.append((v2, v2, v1)) # 2\n points.append((v1, v2, v1)) # 3\n\n points.append((v1, v1, v2)) # 4\n points.append((v2, v1, v2)) # 5\n points.append((v2, v2, v2)) # 6\n points.append((v1, v2, v2)) # 7\n\n points.append((v3, v1, v1)) # 8\n points.append((v2, v3, v1)) # 9\n points.append((v3, v2, v1)) # 10\n points.append((v1, v3, v1)) # 11\n\n points.append((v1, v1, v3)) # 12\n points.append((v2, v1, v3)) # 13\n points.append((v2, v2, v3)) # 14\n points.append((v1, v2, v3)) # 15\n\n points.append((v3, v1, v2)) # 16\n points.append((v2, v3, v2)) # 17\n points.append((v3, v2, v2)) # 18\n points.append((v1, v3, v2)) # 19\n\n points.append((v4, v1, v1)) # 20\n points.append((v1, v4, v1)) # 21\n points.append((v1, v1, v4)) # 22\n\n return points", "def add_poly_mesh_arrays_data_to_gl(self, key, fv_indices, points, face_normals, cstype, c, face_colors, vertex_colors):\n n_vertices_max = len(fv_indices[0])\n\n data_mesh_points_list = np.array([])\n data_mesh_normals_list = np.array([])\n data_mesh_colors_list = np.array([])\n n_all_vertices = 0\n\n max_iter = n_vertices_max - 1\n\n for corner_idx in range(1, max_iter):\n if n_vertices_max > 3:\n existing_triangles = fv_indices[:, corner_idx + 1] != -1\n\n if True not in existing_triangles:\n continue\n\n fv_indices_to_draw_all_vertices = fv_indices[existing_triangles]\n fv_indices_to_draw = fv_indices_to_draw_all_vertices[:, [0, corner_idx, corner_idx + 1]]\n face_normals_to_draw = face_normals[existing_triangles]\n else:\n fv_indices_to_draw = fv_indices\n face_normals_to_draw = face_normals\n\n fv_indices_flattened = fv_indices_to_draw.flatten()\n n_all_vertices += len(fv_indices_flattened)\n\n n_faces = len(fv_indices_to_draw)\n\n vertexData = self.createVertexData(fv_indices_flattened, points)\n\n normalData = self.createNormaldata(face_normals_to_draw)\n\n if cstype == 0:\n colorData = self.createConstantColorData(c, n_faces)\n elif cstype == 1:\n colorData = self.createFaceColorData(face_colors)\n elif cstype == 2:\n colorData = self.createVertexColorData(vertex_colors, fv_indices_flattened)\n\n if self._showBack:\n fv_indices_flattened_reversed = fv_indices_flattened[::-1]\n n_all_vertices += len(fv_indices_flattened_reversed)\n\n reversed_mesh_points = self.createVertexData(fv_indices_flattened_reversed, points)\n\n reversed_normals = self.createNormaldata(-face_normals_to_draw[::-1])\n\n if cstype == 0:\n reversed_colors = colorData\n elif cstype == 1:\n reversed_colors = self.createFaceColorData(face_colors[::-1])\n elif cstype == 2:\n reversed_colors = self.createVertexColorData(vertex_colors, fv_indices_flattened_reversed)\n\n data_mesh_points_list = np.concatenate([data_mesh_points_list, vertexData, reversed_mesh_points])\n data_mesh_normals_list = np.concatenate([data_mesh_normals_list, normalData, reversed_normals])\n data_mesh_colors_list = np.concatenate([data_mesh_colors_list, colorData, reversed_colors])\n else:\n data_mesh_points_list = np.concatenate([data_mesh_points_list, vertexData])\n data_mesh_normals_list = np.concatenate([data_mesh_normals_list, normalData])\n data_mesh_colors_list = np.concatenate([data_mesh_colors_list, colorData])\n\n vertex_data = np.array(data_mesh_points_list, dtype=GLHelpFun.numpydatatype(GLDataType.FLOAT))\n normal_data = np.array(data_mesh_normals_list, dtype=GLHelpFun.numpydatatype(GLDataType.FLOAT))\n color_data = np.array(data_mesh_colors_list, dtype=GLHelpFun.numpydatatype(GLDataType.FLOAT))\n\n self.setlistdata_f3xyzf3nf4rgba(key, vertex_data, normal_data, color_data)\n self.setVertexCounter_byNum(key, n_all_vertices)\n return", "def registerVertices(self,vl):\n self.set('patchmesh.vertices',FuzzList(vl))", "def _create_layout(layout_type, xlabel, ylabel):\n\n base_layout = {\n \"font\": {\"family\": \"Raleway\"},\n \"hovermode\": \"closest\",\n \"margin\": {\"r\": 20, \"t\": 0, \"l\": 0, \"b\": 0},\n \"showlegend\": False,\n }\n\n if layout_type == \"scatter3d\":\n base_layout[\"scene\"] = {\n \"xaxis\": _create_axis(axis_type=\"3d\", title=xlabel),\n \"yaxis\": _create_axis(axis_type=\"3d\", title=ylabel),\n \"zaxis\": _create_axis(axis_type=\"3d\", title=xlabel, variation=\"log\"),\n \"camera\": {\n \"up\": {\"x\": 0, \"y\": 0, \"z\": 1},\n \"center\": {\"x\": 0, \"y\": 0, \"z\": 0},\n \"eye\": {\"x\": 0.08, \"y\": 2.2, \"z\": 0.08},\n },\n }\n\n elif layout_type == \"histogram2d\":\n base_layout[\"xaxis\"] = _black_out_axis(\n _create_axis(axis_type=\"2d\", title=xlabel)\n )\n base_layout[\"yaxis\"] = _black_out_axis(\n _create_axis(axis_type=\"2d\", title=ylabel)\n )\n base_layout[\"plot_bgcolor\"] = \"black\"\n base_layout[\"paper_bgcolor\"] = \"black\"\n base_layout[\"font\"][\"color\"] = \"white\"\n\n elif layout_type == \"scatter\":\n base_layout[\"xaxis\"] = _create_axis(axis_type=\"2d\", title=xlabel)\n base_layout[\"yaxis\"] = _create_axis(axis_type=\"2d\", title=ylabel)\n base_layout[\"plot_bgcolor\"] = \"rgb(230, 230, 230)\"\n base_layout[\"paper_bgcolor\"] = \"rgb(230, 230, 230)\"\n\n return base_layout", "def assignPointsToShapes(self):\n pointsCopy = self.mission['points'].copy()\n\n while len(pointsCopy):\n shape = []\n self.recursiveAddPointToShape(pointsCopy, [pointsCopy[0]], shape)\n shape.append(shape[0])\n self.mission['shapes'].append(shape)", "def _create(self, creation_type: str = \"Uniform\"):\n if creation_type == \"Uniform\":\n number_of_vectors = comb(\n self.lattice_resolution + self.number_of_objectives - 1,\n self.number_of_objectives - 1,\n exact=True,\n )\n self.number_of_vectors = number_of_vectors\n temp1 = range(1, self.number_of_objectives + self.lattice_resolution)\n temp1 = np.array(list(combinations(temp1, self.number_of_objectives - 1)))\n temp2 = np.array(\n [range(self.number_of_objectives - 1)] * self.number_of_vectors\n )\n temp = temp1 - temp2 - 1\n weight = np.zeros(\n (self.number_of_vectors, self.number_of_objectives), dtype=int\n )\n weight[:, 0] = temp[:, 0]\n for i in range(1, self.number_of_objectives - 1):\n weight[:, i] = temp[:, i] - temp[:, i - 1]\n weight[:, -1] = self.lattice_resolution - temp[:, -1]\n self.values = weight / self.lattice_resolution\n self.values_planar = np.copy(self.values)\n self.normalize()\n return\n elif creation_type == \"Focused\":\n point_set = [[0, 1, -1]] * (self.number_of_objectives - 1)\n # The cartesian product of point_set.\n initial = np.array(list(product(*point_set)))[1:]\n # First element was removed because of the error during normalization.\n initial = normalize(initial)\n initial = np.hstack((initial, np.zeros((initial.shape[0], 1))))\n final = shear(initial, degrees=5)\n # Adding the first element back\n final = np.vstack(([0] * (self.number_of_objectives - 1) + [1], final))\n self.number_of_vectors = final.shape[0]\n self.values = rotate(final[0], self.ref_point, final)\n self.values_planar = np.copy(self.values)\n self.normalize()\n self.add_edge_vectors()\n elif creation_type == \"Sparse_Focused\":\n initial = np.eye(self.number_of_objectives - 1)\n initial = np.vstack((initial, -initial))\n initial = normalize(initial)\n initial = np.hstack((initial, np.zeros((initial.shape[0], 1))))\n final = shear(initial, degrees=5)\n # Adding the first element back\n final = np.vstack(([0] * (self.number_of_objectives - 1) + [1], final))\n self.number_of_vectors = final.shape[0]\n self.values = rotate(final[0], self.ref_point, final)\n self.values_planar = np.copy(self.values)\n self.normalize()\n self.add_edge_vectors()", "def set_shape(self, *, coords: np.ndarray, connectivity) -> None:\n\n self._coords = coords\n self._n_dof, self._n_point = coords.shape\n if self._n_dof == 3:\n if np.max(np.abs(coords[2])) < 1.0e-30:\n self._n_dof = 2\n self._n_dfdof = 3 if self._n_dof == 2 else 6\n if type(connectivity) is np.ndarray:\n connectivity = connectivity.tolist()\n self._connectivity = connectivity\n self._n_element = len(connectivity)\n self._material_numbers = np.zeros(self._n_element, dtype=\"int\")\n self._grain_numbers = np.zeros(self._n_element, dtype=\"int\")\n self._crystal_orientation = np.zeros((self._n_element, 3))\n\n self._set_mesh_info()", "def __init__(self, layout, position):\n super(TetrisPiece, self).__init__(attrs_dict={\n 'layout': layout,\n 'position': position,\n 'elements': [\n Element(ElementType.PIECE, position.add(elem_position))\n for elem_position in layout.positions\n ]\n })", "def ReadSeparate(self,connectivity_file,coordinates_file,mesh_type, edges_file = None, faces_file = None,\n delimiter_connectivity=' ',delimiter_coordinates=' ', delimiter_edges=' ', delimiter_faces=' ',\n ignore_cols_connectivity=None,ignore_cols_coordinates=None,ignore_cols_edges=None,\n ignore_cols_faces=None,index_style='c'):\n\n index = 0\n if index_style == 'c':\n index = 1\n\n from time import time; t1=time()\n self.elements = np.loadtxt(connectivity_file,dtype=np.int64,delimiter=delimiter_connectivity) - index\n # self.elements = np.fromfile(connectivity_file,dtype=np.int64,count=-1) - index\n self.points = np.loadtxt(coordinates_file,dtype=np.float64,delimiter=delimiter_coordinates)\n\n\n if ignore_cols_connectivity != None:\n self.elements = self.elements[ignore_cols_connectivity:,:]\n if ignore_cols_coordinates != None:\n self.points = self.points[ignore_cols_coordinates:,:]\n\n if (mesh_type == 'tri' or mesh_type == 'quad') and self.points.shape[1]>2:\n self.points = self.points[:,:2]\n\n self.element_type = mesh_type\n self.nelem = self.elements.shape[0]\n # self.edges = None\n if edges_file is None:\n if mesh_type == \"tri\":\n self.GetBoundaryEdgesTri()\n elif mesh_type == \"tet\":\n self.GetBoundaryEdgesTet()\n else:\n self.edges = np.loadtxt(edges_file,dtype=np.int64,delimiter=delimiter_edges) - index\n if ignore_cols_edges !=None:\n self.edges = self.edges[ignore_cols_edges:,:]\n\n if faces_file is None:\n if mesh_type == \"tet\":\n self.GetBoundaryFacesTet()\n else:\n self.faces = np.loadtxt(faces_file,dtype=np.int64,delimiter=delimiter_edges) - index\n if ignore_cols_faces !=None:\n self.faces = self.faces[ignore_cols_faces:,:]", "def init_grid(self):\n self.pts = np.array(\n np.meshgrid(\n np.arange(self.net_dim[0]) + 1,\n np.arange(self.net_dim[1]) + 1\n )\n ).reshape(2, np.prod(self.net_dim)).T\n if self.topo == \"hexagonal\":\n self.pts[:, 0] = self.pts[:, 0] + .5 * (self.pts[:, 1] % 2)\n self.pts[:, 1] = np.sqrt(3) / 2 * self.pts[:, 1]", "def __init__(self):\n self.lattices = []\n self.meshfns = []", "def PlotMeshNumbering(self, figure=None, show_plot=True):\n\n self.__do_essential_memebers_exist__()\n\n import matplotlib.pyplot as plt\n import matplotlib as mpl\n\n if self.element_type == \"tri\":\n\n if figure is None:\n figure = plt.figure()\n plt.triplot(self.points[:,0],self.points[:,1], self.elements[:,:3])\n plt.tricontourf(self.points[:,0], self.points[:,1], self.elements[:,:3], np.ones(self.points.shape[0]), 100,alpha=0.3)\n\n for i in range(0,self.elements.shape[0]):\n coord = self.points[self.elements[i,:],:]\n x_avg = np.sum(coord[:,0])/self.elements.shape[1]\n y_avg = np.sum(coord[:,1])/self.elements.shape[1]\n plt.text(x_avg,y_avg,str(i),backgroundcolor='#F88379',ha='center')\n\n for i in range(0,self.points.shape[0]):\n plt.text(self.points[i,0],self.points[i,1],str(i),backgroundcolor='#0087BD',ha='center')\n\n plt.axis('equal')\n if show_plot:\n plt.show()\n\n elif self.element_type == \"quad\":\n\n if figure is None:\n figure = plt.figure()\n point_radius = 3.\n\n C = self.InferPolynomialDegree() - 1\n\n edge_elements = self.GetElementsEdgeNumberingQuad()\n reference_edges = NodeArrangementQuad(C)[0]\n reference_edges = np.concatenate((reference_edges,reference_edges[:,1,None]),axis=1)\n reference_edges = np.delete(reference_edges,1,1)\n\n self.GetEdgesQuad()\n x_edges = np.zeros((C+2,self.all_edges.shape[0]))\n y_edges = np.zeros((C+2,self.all_edges.shape[0]))\n\n BasesOneD = np.eye(2,2)\n for iedge in range(self.all_edges.shape[0]):\n ielem = edge_elements[iedge,0]\n edge = self.elements[ielem,reference_edges[edge_elements[iedge,1],:]]\n x_edges[:,iedge], y_edges[:,iedge] = self.points[edge,:].T\n\n\n plt.plot(x_edges,y_edges,'-k')\n\n for i in range(self.elements.shape[0]):\n coord = self.points[self.elements[i,:],:]\n x_avg = np.sum(coord[:,0])/self.elements.shape[1]\n y_avg = np.sum(coord[:,1])/self.elements.shape[1]\n plt.text(x_avg,y_avg,str(i),backgroundcolor='#F88379',ha='center')\n\n for i in range(0,self.points.shape[0]):\n plt.text(self.points[i,0],self.points[i,1],str(i),backgroundcolor='#0087BD',ha='center')\n\n plt.axis('equal')\n if show_plot:\n plt.show()\n\n elif self.element_type == \"tet\" or self.element_type == \"hex\":\n\n import matplotlib as mpl\n import os\n os.environ['ETS_TOOLKIT'] = 'qt4'\n from mayavi import mlab\n\n if figure is None:\n figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(800,600))\n view = mlab.view()\n figure.scene.disable_render = True\n\n color = mpl.colors.hex2color('#F88379')\n\n linewidth = 3.\n # trimesh_h = mlab.triangular_mesh(self.points[:,0],\n # self.points[:,1], self.points[:,2], self.faces[:,:3],\n # line_width=linewidth,tube_radius=linewidth,color=(0,0.6,0.4),\n # representation='wireframe') # representation='surface'\n\n # # CHANGE LIGHTING OPTION\n # trimesh_h.actor.property.interpolation = 'phong'\n # trimesh_h.actor.property.specular = 0.1\n # trimesh_h.actor.property.specular_power = 5\n\n # PLOTTING EDGES\n from Florence.PostProcessing import PostProcess\n tmesh = PostProcess(3,3).Tessellate(self, np.zeros_like(self.points), interpolation_degree=0,\n plot_points=True, plot_edges=True, plot_surfaces=False)\n\n x_edges = tmesh.x_edges\n y_edges = tmesh.y_edges\n z_edges = tmesh.z_edges\n connections = tmesh.connections\n\n src = mlab.pipeline.scalar_scatter(x_edges.T.copy().flatten(), y_edges.T.copy().flatten(), z_edges.T.copy().flatten())\n src.mlab_source.dataset.lines = connections\n h_edges = mlab.pipeline.surface(src, color = (0,0.6,0.4), line_width=linewidth)\n # AVOID WARNINGS\n # lines = mlab.pipeline.stripper(src)\n # h_edges = mlab.pipeline.surface(lines, color = (0,0.6,0.4), line_width=linewidth)\n\n # ELEMENT NUMBERING\n # for i in range(0,self.elements.shape[0]):\n # coord = self.points[self.elements[i,:],:]\n # x_avg = np.sum(coord[:,0])/self.elements.shape[1]\n # y_avg = np.sum(coord[:,1])/self.elements.shape[1]\n # z_avg = np.sum(coord[:,2])/self.elements.shape[1]\n\n # # mlab.text3d(x_avg,y_avg,z_avg,str(i),color=color)\n # mlab.text3d(x_avg,y_avg,z_avg,str(i),color=(0,0,0.),scale=2)\n\n # POINT NUMBERING\n for i in range(self.elements.shape[0]):\n for j in range(self.elements.shape[1]):\n text_obj = mlab.text3d(self.points[self.elements[i,j],0],\n self.points[self.elements[i,j],1],self.points[self.elements[i,j],2],str(self.elements[i,j]),\n color=(0,0,0.),scale=0.05)\n\n\n figure.scene.disable_render = False\n\n if show_plot:\n # mlab.view(*view)\n mlab.show()", "def set_points(self, mode='', points=None, range_=RANGE, size=1):\n if mode == 'last':\n if points is None:\n print('Error: empty last point specification given.')\n return\n tol = 0.1\n [i, j, k] = points\n alpha = 2.0 * np.random.rand(1) + 0.5\n beta = 2.0 * np.random.rand(1) + 0.5\n if i >= 0 and j < 0 and k < 0:\n # at one corner of triangle\n assert i < 3\n other = np.delete(np.arange(3), i)\n u = (self.points[i, :] - self.points[other[0], :]\n ) / np.linalg.norm(\n self.points[i, :] - self.points[other[0], :])\n v = (self.points[i, :] - self.points[other[1], :]\n ) / np.linalg.norm(\n self.points[i, :] - self.points[other[1], :])\n self.points[-1, :] = self.points[i, :] + alpha * u + beta * v\n elif i >= 0 and j >= 0 and k < 0:\n found = False\n safety_it = 0\n while not found:\n alpha = np.random.uniform(tol, 1 - tol)\n beta = 1.0 - alpha\n gamma = 2 * np.random.rand(1) + tol\n assert j < 3\n other = np.delete(np.arange(3), (i, j))\n u = (\n self.points[i, :] - self.points[other, :]\n ) # /np.linalg.norm(self.points[i,:] - self.points[other,:])\n v = (\n self.points[j, :] - self.points[other, :]\n ) # /np.linalg.norm(self.points[j,:] - self.points[other,:])\n self.points[-1, :] = (1.0 + gamma) * (\n self.points[other, :] + alpha * u + beta * v)\n #check if new direction lies between u and v.\n new_direction = self.points[-1, :] - self.points[other, :]\n new_direction = new_direction.cp.reshape(\n (-1, )) / np.linalg.norm(new_direction)\n u = u.cp.reshape((-1, )) / np.linalg.norm(u)\n v = v.cp.reshape((-1, )) / np.linalg.norm(v)\n if abs(\n acos(np.dot(new_direction, u)) +\n acos(np.dot(new_direction, v)) -\n acos(np.dot(u, v))) < 1e-10:\n found = True\n safety_it += 1\n if safety_it > 100:\n print('Error: nothing found after 100 iterations.')\n return\n elif i >= 0 and j >= 0 and k >= 0:\n # inside triangle\n assert k < 3\n found = False\n safety_it = 0\n while not found:\n alpha = np.random.rand(1) + tol\n beta = np.random.rand(1) + tol\n other = np.delete(np.arange(3), i)\n u = self.points[other[0], :] - self.points[i, :]\n v = self.points[other[1], :] - self.points[i, :]\n temptative_point = self.points[i, :] + alpha * u + beta * v\n vjk = self.points[other[1], :] - self.points[other[0], :]\n njk = [vjk[1], -vjk[0]]\n if (np.dot(self.points[j, :] - self.points[i, :], njk) >\n 0) != (np.dot(temptative_point - self.points[j, :],\n njk) > 0):\n self.points[-1, :] = temptative_point\n found = True\n safety_it += 1\n if safety_it > 100:\n print('Error: nothing found after 100 iterations.')\n return\n elif i < 0 and j < 0 and k < 0:\n x = range_[0] + (\n range_[1] - range_[0]) * np.random.rand(1)\n y = range_[2] + (\n range_[1] - range_[0]) * np.random.rand(1)\n self.points[-1, :] = [x, y]\n else:\n print(\"Error: non-valid arguments.\")\n elif mode == 'random':\n \"\"\" Create N uniformly distributed points in [0, size] x [0, size]\n \"\"\"\n self.points = np.random.uniform(0, size, (self.N, self.d))\n elif mode == 'normal':\n self.points = np.random.normal(0, size, (self.N, self.d))\n elif mode == 'circle':\n from math import cos, sin\n x_range = size / 2.0\n y_range = size / 2.0\n c = np.array((x_range, y_range))\n r = 0.9 * min(x_range, y_range)\n theta = 2 * pi / self.N\n for i in range(self.N):\n theta_tot = i * theta\n self.points[i, :] = c + np.array(\n (r * cos(theta_tot), r * sin(theta_tot)))\n elif mode == 'set':\n \"\"\"\n Place points according to hard coded rule.\n \"\"\"\n if self.N == 3:\n x = [-1.0, 1.0, 0.0]\n y = [-1.0, -1.0, 1.0]\n elif self.N == 4:\n x = [-1.0, 1.0, 0.0, 0.0]\n y = [-1.0, -1.0, 1.0, 0.0]\n elif self.N == 5:\n x = [-0.0, 1.5, 1.5, -0.0, -1.0]\n y = [-1.0, -1.0, 1.0, 1.0, 0.0]\n else:\n print(\"Error: No rule defined for N = \", self.N)\n return\n self.points = np.c_[x, y]\n elif mode == 'geogebra':\n if self.N == 4:\n self.points = np.array(((1.5, 1.8), (7.9, 2.5), (2.3, 5.1),\n (3.34, -1.36)))\n elif self.N == 5:\n self.points = np.array(((1.5, 1.8), (7.9, 2.5), (2.3, 5.1),\n (3.34, -1.36), (5, 1.4)))\n else:\n print(\"Error: No rule defined for N = \", self.N)\n elif mode == '':\n if points is None:\n raise NotImplementedError(\"Need to give either mode or points.\")\n else:\n self.points = points\n self.N, self.d = points.shape\n\n self.init()", "def test_points_calculation(self):\n\n assert self.test_shape.points == [\n (1030.0, 525.0),\n (1030.0, 475.0),\n (970.0, 475.0),\n (970.0, 525.0),\n ]", "def Triangle(self, c1=(0.,0.), c2=(0.,1.), c3=(1.,0.), npoints=10, element_type=\"tri\", equally_spaced=True):\n\n if not isinstance(c1,tuple) or not isinstance(c2,tuple) or not isinstance(c3,tuple):\n raise ValueError(\"The coordinates c1, c2 and c3 should be given in tuples of two elements each (x,y)\")\n\n npoints = int(npoints)\n\n\n npoints = npoints - 1\n if npoints < 0:\n npoints = 0\n\n c1 = np.array(c1); c2 = np.array(c2); c3 = np.array(c3)\n opoints = np.vstack((c1,c2,c3))\n oelements = np.array([[0,1,2]])\n\n if element_type==\"tri\":\n mesh = self.TriangularProjection(points=opoints, npoints=npoints, equally_spaced=equally_spaced)\n self.__update__(mesh)\n\n\n elif element_type == \"quad\":\n\n # SPLIT THE TRIANGLE INTO 3 QUADS\n omesh = Mesh()\n omesh.element_type=\"tri\"\n omesh.elements = oelements\n omesh.nelem = omesh.elements.shape[0]\n omesh.points = opoints\n omesh.GetBoundaryEdges()\n\n sys.stdout = open(os.devnull, \"w\")\n omesh.ConvertTrisToQuads()\n sys.stdout = sys.__stdout__\n\n\n npoints = int(npoints/2) + 1\n mesh = self.QuadrilateralProjection(points=omesh.points[omesh.elements[0,:],:],\n npoints=npoints, equally_spaced=equally_spaced)\n for i in range(1,omesh.nelem):\n mesh += self.QuadrilateralProjection(points=omesh.points[omesh.elements[i,:],:],\n npoints=npoints, equally_spaced=equally_spaced)\n\n self.__update__(mesh)", "def make_coordinate_grid(spatial_size, type):\n h, w = spatial_size\n x = torch.arange(w).type(type)\n y = torch.arange(h).type(type)\n x = 2 * (x / (w - 1)) - 1\n y = 2 * (y / (h - 1)) - 1\n yy = y.view(-1, 1).repeat(1, w)\n xx = x.view(1, -1).repeat(h, 1)\n meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2)\n return meshed", "def buildGrid(self, plot=False):\r\n\r\n print(\"Constructing grid\")\r\n # print(\"Grid dims\", self.ne, self.nn, self.nz)\r\n # print(\"Num points\", 2*(self.ne+1)*(self.nn+1)*3, len(self.coords))\r\n\r\n # number of edges\r\n self.ndx = self.ne + 1\r\n self.ndy = self.nn + 1\r\n self.ndz = self.nz + 1\r\n\r\n # extract the triplets\r\n self.points = {}\r\n self.points[\"e\"] = self.coords[0::3]\r\n self.points[\"n\"] = self.coords[1::3]\r\n self.points[\"z\"] = self.coords[2::3]\r\n\r\n print('points e')\r\n print(self.points[\"e\"])\r\n\r\n # Here are the coordinates\r\n self.X0 = np.reshape(self.points[\"e\"][0::2] , (self.ndx,self.ndy), order=\"F\")\r\n self.Y0 = np.reshape(self.points[\"n\"][0::2] , (self.ndx,self.ndy), order=\"F\")\r\n self.Z0 = np.reshape(self.points[\"z\"][0::2] , (self.ndx,self.ndy), order=\"F\")\r\n\r\n self.X1 = np.reshape(self.points[\"e\"][1::2] , (self.ndx,self.ndy), order=\"F\")\r\n self.Y1 = np.reshape(self.points[\"n\"][1::2] , (self.ndx,self.ndy), order=\"F\")\r\n self.Z1 = np.reshape(self.points[\"z\"][1::2] , (self.ndx,self.ndy), order=\"F\")\r\n #\r\n # # visualize\r\n # if plot:\r\n # print(\"plotting\")\r\n # fig = plt.figure()\r\n # ax = fig.add_subplot(111, projection='3d')\r\n # ax.plot_wireframe(f2m*self.X0, f2m*self.Y0, f2m*self.Z0, rstride=1, cstride=1)\r\n # ax.plot_wireframe(f2m*self.X1, f2m*self.Y1, f2m*self.Z1, rstride=1, cstride=1)\r\n # plt.show()\r", "def pointsSetUp(self):\r\n self.background.draw(self.surface)\r\n for i in range(len(self.points)):\r\n self.points[i].organize()\r\n self.points[i].update()\r\n self.points[i].addNumber(i)\r\n self.points[i].setActiveTurn()", "def createVertexData(self, fv_indices_flattened, points):\n mesh_points = points[fv_indices_flattened]\n data_mesh_points = mesh_points.flatten()\n\n return data_mesh_points", "def ChangeType(self):\n\n self.__do_essential_memebers_exist__()\n self.points = np.ascontiguousarray(self.points.astype(np.float64))\n if isinstance(self.elements,np.ndarray):\n self.elements = np.ascontiguousarray(self.elements.astype(np.uint64))\n if hasattr(self, 'edges'):\n if isinstance(self.edges,np.ndarray):\n self.edges = np.ascontiguousarray(self.edges.astype(np.uint64))\n if hasattr(self, 'faces'):\n if isinstance(self.faces,np.ndarray):\n self.faces = np.ascontiguousarray(self.faces.astype(np.uint64))" ]
[ "0.61567265", "0.59727186", "0.5618531", "0.55318904", "0.55072707", "0.5487643", "0.53527415", "0.5328569", "0.5255884", "0.5221636", "0.51695275", "0.5121471", "0.51163435", "0.51055086", "0.5098627", "0.50728333", "0.5067539", "0.50483656", "0.50268656", "0.5023135", "0.49857903", "0.49611372", "0.4961036", "0.49579704", "0.49339977", "0.49115103", "0.49097174", "0.49070203", "0.49051133", "0.4900277" ]
0.6759298
0
Returns a list of sharded queries for the given Cloud Datastore query. This will create up to the desired number of splits, however it may return less splits if the desired number of splits is unavailable. This will happen if the number of split points provided by the underlying Datastore is less than the desired number, which will occur if the number of results for the query is too small. This implementation of the QuerySplitter uses the __scatter__ property to gather random split points for a query.
def get_splits(datastore, query, num_splits, partition=None): # Validate that the number of splits is not out of bounds. if num_splits < 1: raise ValueError('The number of splits must be greater than 0.') if num_splits == 1: return [query] _validate_query(query) splits = [] scatter_keys = _get_scatter_keys(datastore, query, num_splits, partition) last_key = None for next_key in _get_split_key(scatter_keys, num_splits): splits.append(_create_split(last_key, next_key, query)) last_key = next_key splits.append(_create_split(last_key, None, query)) return splits
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_scatter_query(query, num_splits):\n\n scatter_query = query_pb2.Query()\n for kind in query.kind:\n scatter_kind = scatter_query.kind.add()\n scatter_kind.CopyFrom(kind)\n\n # ascending order\n datastore_helper.add_property_orders(scatter_query, SCATTER_PROPERTY_NAME)\n\n # There is a split containing entities before and after each scatter entity:\n # ||---*------*------*------*------*------*------*---|| * = scatter entity\n # If we represent each split as a region before a scatter entity, there is an\n # extra region following the last scatter point. Thus, we do not need the\n # scatter entity for the last region.\n scatter_query.limit.value = (num_splits - 1) * KEYS_PER_SPLIT\n datastore_helper.add_projection(scatter_query, KEY_PROPERTY_NAME)\n\n return scatter_query", "def _get_scatter_keys(datastore, query, num_splits, partition):\n scatter_point_query = _create_scatter_query(query, num_splits)\n\n key_splits = []\n while True:\n req = datastore_pb2.RunQueryRequest()\n if partition:\n req.partition_id.CopyFrom(partition)\n\n req.query.CopyFrom(scatter_point_query)\n\n resp = datastore.run_query(req)\n for entity_result in resp.batch.entity_results:\n key_splits.append(entity_result.entity.key)\n\n if resp.batch.more_results != query_pb2.QueryResultBatch.NOT_FINISHED:\n break\n\n scatter_point_query.start_cursor = resp.batch.end_cursor\n scatter_point_query.limit.value -= len(resp.batch.entity_results)\n\n key_splits.sort(helper.key_comparator)\n return key_splits", "def SplitQuery(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def chunk_queries(queries: List) -> List[List]:\n chunks: List[List] = []\n # Octopart can only handle 20 queries per request, so split into chunks.\n for chunk in chunked(queries):\n chunks.extend(split_chunk(chunk))\n return chunks", "def support_query_split(datasets, query_size):\n support_datasets = {}\n query_datasets = {}\n for task, dataset in datasets.items():\n support_query_split = dataset.train_test_split(test_size=query_size)\n support_datasets[task] = support_query_split[\"train\"]\n query_datasets[task] = support_query_split[\"test\"]\n return support_datasets, query_datasets", "def _create_split(last_key, next_key, query):\n if not (last_key or next_key):\n return query\n\n split_query = query_pb2.Query()\n split_query.CopyFrom(query)\n composite_filter = split_query.filter.composite_filter\n composite_filter.op = CompositeFilter.AND\n\n if query.HasField('filter'):\n composite_filter.filters.add().CopyFrom(query.filter)\n\n if last_key:\n lower_bound = composite_filter.filters.add()\n lower_bound.property_filter.property.name = KEY_PROPERTY_NAME\n lower_bound.property_filter.op = PropertyFilter.GREATER_THAN_OR_EQUAL\n lower_bound.property_filter.value.key_value.CopyFrom(last_key)\n\n if next_key:\n upper_bound = composite_filter.filters.add()\n upper_bound.property_filter.property.name = KEY_PROPERTY_NAME\n upper_bound.property_filter.op = PropertyFilter.LESS_THAN\n upper_bound.property_filter.value.key_value.CopyFrom(next_key)\n\n return split_query", "def _GetShardedBatch() -> tf.types.experimental.distributed.PerReplica:\n per_host_batches: List[py_utils.NestedMap] = []\n # Note: `available_devices` omits the executor host; just those with TPUs.\n for host_device in py_utils.Flatten(\n cluster_factory.Current().available_devices.tolist()\n ):\n with tf.device(host_device):\n batch = self.task.input.GetPreprocessedInputBatch()\n\n # Remove bucket_keys; this relates to GenericInput pipelines.\n batch = batch.FilterKeyVal(lambda k, _: not k.endswith('bucket_keys'))\n\n # Process embedding ID features according to their specified types.\n batch = batch.TransformWithKey(\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.ProcessInputFeature\n )\n\n per_host_batches.extend(Split(batch, replicas_per_host))\n\n return strategy.experimental_distribute_values_from_function(\n lambda ctx: per_host_batches[ctx.replica_id_in_sync_group]\n )", "def _choose_split_points(cls, sorted_keys, shard_count):\n assert len(sorted_keys) >= shard_count\n index_stride = len(sorted_keys) / float(shard_count)\n return [sorted_keys[int(round(index_stride * i))]\n for i in range(1, shard_count)]", "def _split_ns_by_scatter(cls,\n shard_count,\n namespace,\n raw_entity_kind,\n app):\n if shard_count == 1:\n\n return [key_range.KeyRange(namespace=namespace, _app=app)]\n\n ds_query = datastore.Query(kind=raw_entity_kind,\n namespace=namespace,\n _app=app,\n keys_only=True)\n ds_query.Order(\"__scatter__\")\n oversampling_factor = 32\n random_keys = ds_query.Get(shard_count * oversampling_factor)\n\n if not random_keys:\n\n\n return ([key_range.KeyRange(namespace=namespace, _app=app)] +\n [None] * (shard_count - 1))\n\n random_keys.sort()\n\n if len(random_keys) >= shard_count:\n\n random_keys = cls._choose_split_points(random_keys, shard_count)\n\n k_ranges = []\n\n k_ranges.append(key_range.KeyRange(\n key_start=None,\n key_end=random_keys[0],\n direction=key_range.KeyRange.ASC,\n include_start=False,\n include_end=False,\n namespace=namespace,\n _app=app))\n\n for i in range(0, len(random_keys) - 1):\n k_ranges.append(key_range.KeyRange(\n key_start=random_keys[i],\n key_end=random_keys[i+1],\n direction=key_range.KeyRange.ASC,\n include_start=True,\n include_end=False,\n namespace=namespace,\n _app=app))\n\n k_ranges.append(key_range.KeyRange(\n key_start=random_keys[-1],\n key_end=None,\n direction=key_range.KeyRange.ASC,\n include_start=True,\n include_end=False,\n namespace=namespace,\n _app=app))\n\n if len(k_ranges) < shard_count:\n\n k_ranges += [None] * (shard_count - len(k_ranges))\n return k_ranges", "def dataset_splits(self):\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 80,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 2,\n }]", "def SplitQuery(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def SplitQuery(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def SplitQuery(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def split_shards(original_list, split_fractions):\n\n assert np.isclose(\n sum(split_fractions), 1.0\n ), f\"Split fractions do not sum to 1: {sum(split_fractions)}\"\n\n original_list = [str(x) for x in sorted(original_list)]\n\n sublists = []\n prev_index = 0\n for weight in split_fractions:\n next_index = prev_index + int(round((len(original_list) * weight), 0))\n sublists.append(original_list[prev_index:next_index])\n prev_index = next_index\n\n assert sum([len(x) for x in sublists]) == len(original_list), \"Split size mismatch\"\n\n if not all(len(x) > 0 for x in sublists):\n logger.warning(\"Unexpected shard distribution encountered - trying to fix this\")\n if len(split_fractions) == 3:\n if len(sublists[0]) > 2:\n sublists[0] = original_list[:-2]\n sublists[1] = original_list[-2:-1]\n sublists[2] = original_list[-1:]\n else:\n raise ValueError(\n f\"Not enough shards (#{len(original_list)}) for new distribution\"\n )\n\n elif len(split_fractions) == 2:\n sublists[0] = original_list[:-1]\n sublists[1] = original_list[-1:]\n else:\n raise ValueError\n logger.warning(f\"New shard split: {sublists}\")\n\n if len(sublists) != 3:\n logger.warning(\"No test shards specified\")\n sublists.append(None)\n\n return sublists", "def dataset_splits(self):\n # 10% evaluation data\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 799,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 1,\n }]", "def partition(data, n):\n splits = []\n remaining = data.copy(deep=True)\n for i in range(n):\n split = remaining.sample(frac=1/(n-i), random_state=10)\n splits.append(split)\n remaining = remaining.drop(split.index)\n return splits", "def get_queries(self, query_ids):\n return [\n self.resource_loader.query_cache.get(query_id) for query_id in query_ids\n ]", "def batch_qs(qs: QuerySet, batch_size=1000):\n total = qs.count()\n for start in range(0, total, batch_size):\n end = min(start + batch_size, total)\n yield (qs[start:end], start, end, total)", "def produce_query_batches(self):\n self.__generate_queries()\n return self.__bobs", "def getStars(queries, lcs_fold, query_path=None, progb_txt=\"Querying stars: \"):\n ORDINARY_QUERY_KEY = \"QUERY:\"\n\n stars = []\n for query in tqdm(queries, desc=progb_txt):\n query = query.strip()\n\n if query.startswith(ORDINARY_QUERY_KEY):\n stars += getStarsFromRemoteDb(\n query[len(ORDINARY_QUERY_KEY):], query_path)\n\n else:\n stars += getStarsFromFolder(query, lcs_fold)\n\n if not stars:\n raise QueryInputError(\"There no stars. Your query: %s\" % queries)\n\n return stars", "def test_split_queries(self):\n la_provider = self.la_provider\n\n start = datetime.utcnow() - pd.Timedelta(\"5H\")\n end = datetime.utcnow() + pd.Timedelta(\"5min\")\n delta = pd.Timedelta(\"1H\")\n\n ranges = QueryProvider._calc_split_ranges(start, end, delta)\n result_queries = la_provider.all_queries.list_alerts(\n \"print\", start=start, end=end, split_query_by=\"1H\"\n )\n queries = result_queries.split(\"\\n\\n\")\n self.assertEqual(len(queries), 5)\n\n for idx, (st_time, e_time) in enumerate(ranges):\n self.assertIn(st_time.isoformat(sep=\"T\") + \"Z\", queries[idx])\n self.assertIn(e_time.isoformat(sep=\"T\") + \"Z\", queries[idx])\n self.assertIn(start.isoformat(sep=\"T\") + \"Z\", queries[0])\n self.assertIn(end.isoformat(sep=\"T\") + \"Z\", queries[-1])", "def perform_query(self, query: str, n: int = 5):\n query = [query]\n cosine_similarities = self.get_cosine_similarity(query)\n indices = cosine_similarities.argsort()[:-n-1:-1]\n return self.data[\n self.data.index.isin(indices)\n ].reset_index(drop=True).to_dict(orient='records')", "def split(self, num_or_size_splits, shuffle=False):\n raise NotImplementedError", "def Run(self):\n results = []\n count = 1\n log_level = logging.DEBUG - 1\n for bound_query in self.__bound_queries:\n logging.log(log_level, 'Running query #%i' % count)\n results.append(bound_query.Run())\n count += 1\n\n def IterateResults(results):\n \"\"\"Iterator function to return all results in sorted order.\n\n Iterate over the array of results, yielding the next element, in\n sorted order. This function is destructive (results will be empty\n when the operation is complete).\n\n Args:\n results: list of result iterators to merge and iterate through\n\n Yields:\n The next result in sorted order.\n \"\"\"\n result_heap = []\n for result in results:\n heap_value = MultiQuery.SortOrderEntity(result, self.__orderings)\n if heap_value.GetEntity():\n heapq.heappush(result_heap, heap_value)\n\n used_keys = set()\n\n while result_heap:\n top_result = heapq.heappop(result_heap)\n\n results_to_push = []\n if top_result.GetEntity().key() not in used_keys:\n yield top_result.GetEntity()\n else:\n pass\n\n used_keys.add(top_result.GetEntity().key())\n\n results_to_push = []\n while result_heap:\n next = heapq.heappop(result_heap)\n if cmp(top_result, next):\n results_to_push.append(next)\n break\n else:\n results_to_push.append(next.GetNext())\n results_to_push.append(top_result.GetNext())\n\n for popped_result in results_to_push:\n if popped_result.GetEntity():\n heapq.heappush(result_heap, popped_result)\n\n return IterateResults(results)", "def produce_query_batches(self):\n pass", "def test_split(self):\n array = np.arange(1000)\n df = DataFlow.from_numpy(array)\n\n # first, test throw errors on invalid arguments\n def assert_invalid_arg(**kwargs):\n with self.assertRaises(ValueError):\n df.split(**kwargs)\n assert_invalid_arg(partitions=[])\n assert_invalid_arg(partitions=[1000, 1])\n assert_invalid_arg(partitions=[1000, -1])\n assert_invalid_arg(partitions=[1, 2])\n assert_invalid_arg(portions=[])\n assert_invalid_arg(portions=[1.0, 0.1])\n assert_invalid_arg(portions=[1.0, -1])\n assert_invalid_arg(portions=[0.1, 0.2])\n\n # next, test split without shuffling\n df1, df2, df3 = df.split(partitions=[700, 200, 100])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n df1, df2, df3 = df.split(portions=[-1, 0.2, 0.1])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n\n # finally, test split with shuffling\n df1, df2 = df.split(portions=[0.5, -1], shuffle=True)\n self.assertEquals(len(df1), 500)\n self.assertEquals(len(df2), 500)\n df_array = np.concatenate([df1.all()[0], df2.all()[0]], axis=0)\n self.assertFalse(np.all(df_array == array))\n np.testing.assert_array_equal(np.sort(df_array), array)", "def getKSplits(df, n_splits, seed = None):\n\n result = []\n\n # None random seed is same as not setting it\n df_shuffled = df.sample(len(df), random_state = seed)\n\n fold_size = int(len(df) / n_splits)\n\n for i in range(n_splits):\n if i == n_splits - 1: # last iteration\n df_fold = df_shuffled[fold_size * (i): len(df)] # gets remainder\n else:\n df_fold = df_shuffled[fold_size * (i):fold_size * (i + 1) ] # python starts indexing at 0\n result.append(df_fold)\n\n return result", "def multiQuery(self, query, limit):\n try:\n results = self.sp.search(query, limit)\n resultLists = results['tracks']['items']\n return resultLists\n except spotipy.SpotifyException as se:\n self.authenticate()\n return self.multiQuery(query, limit)", "def ShardList(list_to_shard, total_shards, shard_idx):\n length = len(list_to_shard)\n split_lists = []\n for i in range(total_shards):\n start_idx = i * length // total_shards\n end_idx = (i + 1) * length // total_shards\n split_lists.append(list_to_shard[start_idx: end_idx])\n\n return split_lists[shard_idx]", "def split_into_batches_of_size(self, batch_size: int) -> Iterator[List]:\n if batch_size >= len(self):\n yield type(self)(self)\n else:\n for run in range(0, len(self), batch_size):\n yield self[run:run + batch_size]" ]
[ "0.67025167", "0.61562634", "0.5903706", "0.58714837", "0.5599633", "0.55345684", "0.5488992", "0.54546636", "0.5414201", "0.53706396", "0.5295972", "0.5295972", "0.52716196", "0.5193675", "0.51312673", "0.50698984", "0.50670683", "0.50543606", "0.504738", "0.501817", "0.50089234", "0.5007577", "0.4952316", "0.49514526", "0.4933965", "0.49306452", "0.4878465", "0.48720595", "0.48711", "0.48708037" ]
0.7298268
0
Verifies that the given query can be properly scattered.
def _validate_query(query): if len(query.kind) != 1: raise ValueError('Query must have exactly one kind.') if query.order: raise ValueError('Query cannot have any sort orders.') if query.HasField('limit'): raise ValueError('Query cannot have a limit set.') if query.offset > 0: raise ValueError('Query cannot have an offset set.') _validate_filter(query.filter)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_data_validity(X, y, query, task):\n # ADD IMPLEMENTATION HERE", "def _verify_query_segregation(query, auth_project=None):\r\n auth_project = (auth_project or\r\n acl.get_limited_to_project(pecan.request.headers))\r\n\r\n if not auth_project:\r\n return\r\n\r\n for q in query:\r\n if q.field in ('project', 'project_id') and auth_project != q.value:\r\n raise ProjectNotAuthorized(q.value)", "def is_valid_query(query):\n\n if query is None:\n return False\n\n if len(query) == 0:\n return False\n\n return True", "def is_valid(query: dict, secret: str, user_id: str) -> bool:\n if not query.get(\"sign\"):\n print(\"no sign\")\n return False\n\n if user_id != query.get(\"vk_user_id\")[0]:\n print(\"mismatch user_id\")\n return False\n \n vk_subset = sorted(\n filter(\n lambda key: key.startswith(\"vk_\"), \n query\n )\n )\n\n if not vk_subset:\n print(\"no subset\")\n return False\n\n ordered = {k: query[k] for k in vk_subset}\n\n hash_code = b64encode(\n HMAC(\n secret.encode(), \n parse.urlencode(ordered, doseq=True).encode(), \n sha256\n ).digest()\n ).decode(\"utf-8\")\n\n if hash_code[-1] == \"=\":\n hash_code = hash_code[:-1]\n\n fixed_hash = hash_code.replace('+', '-').replace('/', '_')\n print(query.get(\"sign\"))\n print(fixed_hash)\n return query.get(\"sign\")[0] == fixed_hash", "def assert_goodness(self):\n if self._setted:\n self.assert_stored_iss()\n self.assert_stored_ks()\n ## Check idxs\n self.assert_stored_idxs()\n ## Check sp_relative_pos\n self.assert_stored_sp_rel_pos()", "def test_disallowed_queries():\n strings = [\"select * from test times 10\",\n \"select * from test save clusters with threshold .5 as test.csv\",\n \"select * from test given a=5\",\n \"select * from test with confidence .4\",\n \"select a conf .4 from test\",\n \"select a conf .4, b from test\",\n \"simulate a conf .4 from test times 10\",\n \"simulate a conf .4, b from test times 10\",\n \"infer * from test times 10\",\n \"infer typicality from test\",\n \"infer * from test with confidence 1.5\",\n \"simulate typicality from test\",\n \"infer * from test save clusters with threshold .5 as test.csv\",\n \"infer * from test given a=5\",\n \"simulate * from test where a < 4\",\n \"simulate * from test save clusters with threshold .5 as test.csv\",\n \"simulate * from test with confidence .4\",\n \"simulate * from test with 4 samples\",\n \"simulate * from test\",\n \"estimate columns from test with confidence .4\",\n \"estimate columns from test given a=4\",\n \"estimate columns from test times 10\",\n \"summarize estimate columns from test\",\n \"plot estimate columns from test\",\n \"estimate columns from test save clusters with threshold .5 as test.csv\",\n \"estimate pairwise correlation from test where a = b\",\n \"estimate pairwise correlation from test times 10\",\n \"estimate pairwise correlation from test given a = 5\",\n \"estimate pairwise correlation from test with confidence .2\",\n \"estimate pairwise row similarity from test where a = b\",\n \"estimate pairwise row similarity from test times 10\",\n \"estimate pairwise row similarity from test given a = 5\",\n \"estimate pairwise row similarity from test with confidence .2\",\n \"estimate pairwise row similarity from test where a = b\"\n ]\n\n for query_string in strings:\n ast = bql_statement.parseString(query_string,parseAll=True)\n with pytest.raises(AssertionError):\n parser.parse_single_statement(ast)", "def test_overlap(query, reference):\n return (reference[0] <= query[0] <= reference[1] or\n reference[0] <= query[1] <= reference[1] or\n query[0] <= reference[0] <= reference[1] <= query[1])", "def is_valid(*, query: dict, secret: str) -> bool:\n vk_subset = OrderedDict(\n sorted(x for x in query.items() if x[0][:3] == \"vk_\"))\n hash_code = b64encode(HMAC(secret.encode(), urlencode(\n vk_subset, doseq=True).encode(), sha256).digest())\n decoded_hash_code = hash_code.decode(\n 'utf-8')[:-1].replace('+', '-').replace('/', '_')\n return query[\"sign\"] == decoded_hash_code", "def _check_queryable(self):\n if not self._bucket:\n raise Exception('Bucket has not been selected')", "def _can_handle_query(cls, *query):\n chkattr = [\"Time\", \"Instrument\", \"SatelliteNumber\"]\n chklist = [x.__class__.__name__ in chkattr for x in query]\n for x in query:\n if x.__class__.__name__ == \"Instrument\" and x.value.lower() in (\n \"xrs\",\n \"goes\",\n ):\n return all(chklist)\n return False", "def prove(self, query, context):\n return self.askOne(query, context) != None", "def is_valid_query(query: Dict[str, Any]) -> bool:\n for name, value in query.items():\n if is_illegal_surrogate(name) or is_illegal_surrogate(value):\n return False\n return True", "def validate(self, query):\n request = Request(method=\"post\", endpoint=\"/query\", data={\"query\": query})\n\n def response_handler(resp):\n if not resp.is_success:\n raise C8QLQueryValidateError(resp, request)\n body = resp.body\n body.pop(\"code\", None)\n body.pop(\"error\", None)\n if \"bindVars\" in body:\n body[\"bind_vars\"] = body.pop(\"bindVars\")\n return body\n\n return self._execute(request, response_handler)", "def _validate_query_parameters(self):\n check_years(self._years)\n check_geo_hierarchy(self.for_geo, self.in_geo)\n check_geo_estimates(self.estimate, self.for_geo)\n return True", "def _arguments_valid(self) -> bool:\n return self.find and self.near and self.max_results >= 1", "def check_consistency(self, es):", "def test_sqpp_distributed_ands_equivalent(self):\n self.assertEqual(sorted(perform_request_search(p='ellis and (kaluza-klein or r-parity)')),\n sorted(perform_request_search(p='ellis and (r-parity or kaluza-klein)')))", "def qualifies(node, queries):\n # assert self.isleaf(node)\n for axis, (i, j) in enumerate(queries):\n if not (i <= node.point[axis] < j):\n return False\n return True", "def assert_mapping_consistency(layout):\n values = sorted(layout.values())\n keys = list(layout)\n ref_keys = [\"q\" + str(i) for i in range(len(keys))]\n if keys != ref_keys:\n raise PlacementError(\"Some physical qubits in the layout may be missing or duplicated.\")\n if values != list(range(len(values))):\n raise PlacementError(\"Some logical qubits in the layout may be missing or duplicated.\")", "def checkSat(self, query):\n errMsg = \"Method has not yet been implemented.\"\n raise NotImplementedError(errMsg)", "def isValidStatsQuery(self, statsQuery):\n if len(statsQuery) != 2 and len(statsQuery) != 3:\n return False\n if len(statsQuery) == 2:\n validStatsQueries = [[\"active\", \"clients\"], [\"now\", \"rented\"], [\"late\", \"rentals\"]]\n isValidQuery = False\n for query in validStatsQueries:\n if query[0] == statsQuery[0] and query[1] == statsQuery[1]:\n isValidQuery = True\n elif len(statsQuery) == 3:\n validStatsQueries = [[\"most\", \"rented\", \"days\"], [\"most\", \"rented\", \"times\"]]\n isValidQuery = False\n for query in validStatsQueries:\n if query[0] == statsQuery[0] and query[1] == statsQuery[1] and query[2] == statsQuery[2]:\n isValidQuery = True\n return isValidQuery", "def voxelConsistency(cleaned_dataframe, column_number, expected_size):\n consistency_boolean = True\n for row in cleaned_dataframe.index:\n if cleaned_dataframe[column_number][row] == expected_size:\n continue\n elif cleaned_dataframe[column_number][row] != expected_size:\n print(\"Subject scan \" + cleaned_dataframe[0][row] + \" does not have voxel size of \" +str(expected_size))\n consistency_boolean = False\n return consistency_boolean", "def can_access_all_queries(self) -> bool:\n\n return self.can_access(\"all_query_access\", \"all_query_access\")", "def test_allow_filtering(self):\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t \"\n \"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)\"))\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {v}\".format(v=i), [i, i, 'a', 3.0])\n\n rows = list(session.execute(\"SELECT * FROM t_by_v2 WHERE v2 = 'a'\"))\n assert len(rows) == 1000, \"Expected 1000 rows but got {}\".format(len(rows))\n\n assert_invalid(session, \"SELECT * FROM t_by_v WHERE v = 1 AND v2 = 'a'\")\n assert_invalid(session, \"SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = 1\")\n\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {} AND v3 = 3.0 ALLOW FILTERING\".format(i),\n [i, i, 'a', 3.0]\n )\n assert_one(\n session,\n \"SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = {} ALLOW FILTERING\".format(i),\n ['a', i, i, 3.0]\n )", "def is_valid_query(query):\n try:\n etree.ETXPath(query)\n return True\n except etree.XPathSyntaxError:\n return False", "def _validate_query(query, db_func, internal_keys=None,\r\n allow_timestamps=True):\r\n\r\n internal_keys = internal_keys or []\r\n _verify_query_segregation(query)\r\n\r\n valid_keys = inspect.getargspec(db_func)[0]\r\n internal_keys.append('self')\r\n valid_keys = set(valid_keys) - set(internal_keys)\r\n translation = {'user_id': 'user',\r\n 'project_id': 'project',\r\n 'resource_id': 'resource'}\r\n\r\n has_timestamp_query = _validate_timestamp_fields(query,\r\n 'timestamp',\r\n ('lt', 'le', 'gt', 'ge'),\r\n allow_timestamps)\r\n has_search_offset_query = _validate_timestamp_fields(query,\r\n 'search_offset',\r\n 'eq',\r\n allow_timestamps)\r\n\r\n if has_search_offset_query and not has_timestamp_query:\r\n raise wsme.exc.InvalidInput('field', 'search_offset',\r\n \"search_offset cannot be used without \" +\r\n \"timestamp\")\r\n\r\n def _is_field_metadata(field):\r\n return (field.startswith('metadata.') or\r\n field.startswith('resource_metadata.'))\r\n\r\n for i in query:\r\n if i.field not in ('timestamp', 'search_offset'):\r\n key = translation.get(i.field, i.field)\r\n operator = i.op\r\n if key in valid_keys or _is_field_metadata(i.field):\r\n if operator == 'eq':\r\n if key == 'enabled':\r\n i._get_value_as_type('boolean')\r\n elif _is_field_metadata(key):\r\n i._get_value_as_type()\r\n else:\r\n raise wsme.exc.InvalidInput('op', i.op,\r\n 'unimplemented operator for '\r\n '%s' % i.field)\r\n else:\r\n msg = (\"unrecognized field in query: %s, \"\r\n \"valid keys: %s\") % (query, sorted(valid_keys))\r\n raise wsme.exc.UnknownArgument(key, msg)", "def check_containment(\n row,\n query_index,\n reference_index,\n percent_identity=PERCENT_IDENTITY,\n covered_length=COVERED_LENGTH,\n):\n if (row[\"qseqid\"] != row[\"sseqid\"]) and (row[\"pident\"] >= percent_identity):\n query_covered = row[\"length\"] / float(query_index.loc[row[\"qseqid\"], \"LENGTH\"])\n reference_covered = row[\"length\"] / float(\n reference_index.loc[row[\"sseqid\"], \"LENGTH\"]\n )\n if query_covered >= covered_length or reference_covered >= covered_length:\n return True\n else:\n return False\n else:\n return False", "def compare_with_old_data_query(self):\n raise NotImplementedError", "def test_queries_verify_validities_and_engagements_query_string(\n query_valid_from: str,\n query_valid_to: str,\n query_started_engagements: str,\n query_ended_engagements: str,\n) -> None:\n assert query_valid_from == gql_query_validity_field(validity_from=True)\n assert query_valid_to == gql_query_validity_field(validity_to=True)\n\n # The call returns None, when no from or to is provided.\n assert gql_query_validity_field() is None\n\n assert query_started_engagements == gql_query_persons_details_to_display(\n started_engagement=True\n )\n assert query_ended_engagements == gql_query_persons_details_to_display(\n ended_engagement=True\n )\n\n # The call returns None, when no started or ended is provided.\n assert gql_query_persons_details_to_display() is None", "def _can_handle_query(cls, *query):\n # Import here to prevent circular imports\n from sunpy.net import attrs as a\n\n required = {a.Time, a.Instrument}\n optional = {a.Wavelength, a.Level, a.goes.SatelliteNumber}\n all_attrs = {type(x) for x in query}\n\n ops = all_attrs - required\n # check to ensure that all optional requirements are in approved list\n if ops and not all(elem in optional for elem in ops):\n return False\n\n # if we get this far we have either Instrument and Time\n # or Instrument, Time and Wavelength\n check_var_count = 0\n for x in query:\n if isinstance(x, a.Instrument) and x.value.lower() == 'suvi':\n check_var_count += 1\n\n if check_var_count == 1:\n return True\n else:\n return False" ]
[ "0.6401588", "0.6210684", "0.58407664", "0.57362634", "0.5674811", "0.5641738", "0.56243056", "0.56197655", "0.56001127", "0.5595915", "0.55926687", "0.5555283", "0.5554201", "0.55246806", "0.54903454", "0.5469015", "0.5455433", "0.54320014", "0.5399444", "0.5393129", "0.5352158", "0.53394794", "0.5291742", "0.5286834", "0.5274041", "0.52452135", "0.52322876", "0.5226827", "0.5221606", "0.52024615" ]
0.6341513
1
Validates that we only have allowable filters. Note that equality and ancestor filters are allowed, however they may result in inefficient sharding.
def _validate_filter(filter): if filter.HasField('composite_filter'): for sub_filter in filter.composite_filter.filters: _validate_filter(sub_filter) elif filter.HasField('property_filter'): if filter.property_filter.op in UNSUPPORTED_OPERATORS: raise ValueError('Query cannot have any inequality filters.') else: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_filters(self, level):\n if(self.filters == Filters.NoFilter):\n return True\n else:\n return (self.filters & level.filters == 0)", "def is_valid_model_filters(model, filters):\n for key in filters.keys():\n if not hasattr(model, key):\n return False\n return True", "def is_valid_model_filters(model, filters):\n for key in filters.keys():\n if not hasattr(model, key):\n return False\n return True", "def _CheckFilter(self, filter, values):\n try:\n match = Query.FILTER_REGEX.match(filter)\n if not match:\n raise datastore_errors.BadFilterError(\n 'Could not parse filter string: %s' % str(filter))\n except TypeError:\n raise datastore_errors.BadFilterError(\n 'Could not parse filter string: %s' % str(filter))\n\n property = match.group(1)\n operator = match.group(3)\n if operator is None:\n operator = '='\n\n if isinstance(values, tuple):\n values = list(values)\n elif not isinstance(values, list):\n values = [values]\n if isinstance(values[0], datastore_types._RAW_PROPERTY_TYPES):\n raise datastore_errors.BadValueError(\n 'Filtering on %s properties is not supported.' % typename(values[0]))\n\n if operator in self.INEQUALITY_OPERATORS:\n if self.__inequality_prop and property != self.__inequality_prop:\n raise datastore_errors.BadFilterError(\n 'Only one property per query may have inequality filters (%s).' %\n ', '.join(self.INEQUALITY_OPERATORS))\n elif len(self.__orderings) >= 1 and self.__orderings[0][0] != property:\n raise datastore_errors.BadFilterError(\n 'Inequality operators (%s) must be on the same property as the '\n 'first sort order, if any sort orders are supplied' %\n ', '.join(self.INEQUALITY_OPERATORS))\n\n if (self.__kind is None and\n property != datastore_types._KEY_SPECIAL_PROPERTY):\n raise datastore_errors.BadFilterError(\n 'Only %s filters are allowed on kindless queries.' %\n datastore_types._KEY_SPECIAL_PROPERTY)\n\n if property in datastore_types._SPECIAL_PROPERTIES:\n if property == datastore_types._KEY_SPECIAL_PROPERTY:\n for value in values:\n if not isinstance(value, Key):\n raise datastore_errors.BadFilterError(\n '%s filter value must be a Key; received %s (a %s)' %\n (datastore_types._KEY_SPECIAL_PROPERTY, value, typename(value)))\n\n return match", "def _has_filters(self):\n return self.query.has_filters()", "def has_filter(self) -> bool:\n return self.filter_client_reference_id or self.filter_mhr_number or self.filter_registration_type or \\\n self.filter_reg_start_date or self.filter_status_type or self.filter_submitting_name or \\\n self.filter_username", "def test_optional_filter_params(self):\n del self.internal_filter['max']\n del self.external_filter['max']\n\n # Serialize\n result = serializers.FilterSerializer(self.internal_filter).data\n self.assertDictEqual(result, self.external_filter)\n\n # Deserialize\n serializer = serializers.FilterSerializer(data=self.external_filter)\n self.assertTrue(serializer.is_valid())\n self.assertDictEqual(serializer.validated_data, self.internal_filter)", "def check_filters(self):\n try:\n if(self.fakes_model.check_code_in_model(self.code)):\n self.filters |= Filters.Fake\n except AttributeError: # No fakes model has been set.\n pass\n\n self.check_potentially_fake()\n self.check_tags()", "def is_valid_model_filters(model, filters, exclude_list=None):\n for key in filters.keys():\n if exclude_list and key in exclude_list:\n continue\n if key == 'metadata':\n if not isinstance(filters[key], dict):\n LOG.debug(\"Metadata filter value is not valid dictionary\")\n return False\n continue\n try:\n key = key.rstrip('~')\n getattr(model, key)\n except AttributeError:\n LOG.debug(\"'%s' filter key is not valid.\", key)\n return False\n return True", "def check_filter(self, filter):\n if filter is None:\n return True\n if not _valid_filter(filter):\n raise ValueError(filter)\n elif not self._filter_supported(filter):\n msg = \"{} not indexed for filter: '{}'.\"\n raise RuntimeError(msg.format(type(self).__name__, filter))", "def get_request_filters(self):\n # build the compiled set of all filters\n requested_filters = OrderedDict()\n for filter_name, f in self.filters.items():\n requested_filters[filter_name] = f\n\n # exclusion params\n exclude_name = '%s!' % filter_name\n if related(self, exclude_name) in self.data:\n # deepcopy the *base* filter to prevent copying of model & parent\n f_copy = copy.deepcopy(self.base_filters[filter_name])\n f_copy.parent = f.parent\n f_copy.model = f.model\n f_copy.exclude = not f.exclude\n\n requested_filters[exclude_name] = f_copy\n\n return requested_filters", "def _apply_filters(self, metadata):\n if \"keywords\" in self.filters:\n if not metadata.keywords:\n return False\n if not all(keyword in metadata.keywords for keyword in self.filters[\"keywords\"]):\n return False\n if \"features\" in self.filters:\n if not metadata.features:\n return False\n if not all(feature in metadata.features for feature in self.filters[\"features\"]):\n return False\n if \"authors\" in self.filters:\n if not metadata.authors:\n return False\n if not all(author in metadata.authors for author in self.filters[\"authors\"]):\n return False\n if \"version\" in self.filters:\n if not metadata.pylith_version:\n return False\n for verMeta in metadata.pylith_version:\n if not eval(\"{ver} {verMeta}\".format(ver=self.filters[\"version\"], verMeta=verMeta)):\n return False\n return True", "def condition_filters(self):\r\n return filters.Filters(self)", "def _propertyFilter(self, entity, params):\n\n if 'property_conditions' not in params:\n raise ProtocolError()\n\n conditions = params['property_conditions']\n\n for field, allowed_values in conditions.iteritems():\n if entity.__getattribute__(field) not in allowed_values:\n return False\n\n return True", "def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.params.get(param)\n\n if 'changes-since' in filters:\n isotime = filters['changes-since']\n try:\n filters['changes-since'] = timeutils.parse_isotime(isotime)\n except ValueError:\n raise exc.HTTPBadRequest(_(\"Unrecognized changes-since value\"))\n\n if 'protected' in filters:\n value = self._get_bool(filters['protected'])\n if value is None:\n raise exc.HTTPBadRequest(_(\"protected must be True, or \"\n \"False\"))\n\n filters['protected'] = value\n\n # only allow admins to filter on 'deleted'\n if req.context.is_admin:\n deleted_filter = self._parse_deleted_filter(req)\n if deleted_filter is not None:\n filters['deleted'] = deleted_filter\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n\n if properties:\n filters['properties'] = properties\n\n return filters", "def filter(self, *arguments, **kwargs):\n from jetengine.query_builder.node import Q, QCombination, QNot\n from jetengine.query_builder.transform import validate_fields\n\n if arguments and len(arguments) == 1 and isinstance(arguments[0], (Q, QNot, QCombination)):\n if self._filters:\n self._filters = self._filters & arguments[0]\n else:\n self._filters = arguments[0]\n else:\n validate_fields(self.__klass__, kwargs)\n if self._filters:\n self._filters = self._filters & Q(**kwargs)\n else:\n if arguments and len(arguments) == 1 and isinstance(arguments[0], dict):\n self._filters = Q(arguments[0])\n else:\n self._filters = Q(**kwargs)\n\n return self", "def validate_remove(cls, filters: dict) -> dict:\n return cls.validate_query(filters)", "def validate_filterval(filterval):\n if filterval != 'description' and filterval != 'fulldescription' and filterval != 'completed':\n return False\n else:\n return True", "def query_filters_restricted (self) :\n user = self.user_restriction\n if user is not None :\n return Q.created_by == user", "def _validate_submission_type(filters: dict) -> None:\n legacy_submission_type = filters.get(\"submission_type\", ...)\n submission_types = filters.get(\"submission_types\", ...)\n\n if submission_types == ... and legacy_submission_type == ...:\n raise InvalidParameterException(\"Missing required filter: submission_types\")\n\n elif submission_types == ... and legacy_submission_type != ...:\n del filters[\"submission_type\"]\n if isinstance(legacy_submission_type, list):\n raise InvalidParameterException(\"Use filter `submission_types` to request multiple submission types\")\n else:\n submission_types = [legacy_submission_type]\n else:\n if not isinstance(submission_types, list):\n submission_types = [submission_types]\n\n if len(submission_types) == 0:\n msg = f\"Provide at least one value in submission_types: {' '.join(VALID_ACCOUNT_SUBMISSION_TYPES)}\"\n raise InvalidParameterException(msg)\n\n if any(True for submission_type in submission_types if submission_type not in VALID_ACCOUNT_SUBMISSION_TYPES):\n msg = f\"Invalid value in submission_types. Options: [{', '.join(VALID_ACCOUNT_SUBMISSION_TYPES)}]\"\n raise InvalidParameterException(msg)\n\n filters[\"submission_types\"] = list(set(submission_types))", "def is_to_filter(self):\n if not self.app.args.filter is None:\n # Check the flag value to evite problem in search process\n ok = self.validate_value_flag()\n\n if ok is False:\n fatal([\n 'Invalid value for \"value\" flag',\n 'The value flag is required to filter',\n 'Use instead:',\n '$ tasks-app show --filter/-f={} --value/-v=VALUE'.format(self.app.args.filter),\n ])\n else:\n return True\n else:\n return False", "def get_input_filters(self):\n\n collections = self.list(in_string=False)\n this_collection = None\n\n for collection in collections:\n if collection[\"name\"] == self._collection:\n this_collection = collection\n break\n\n if not this_collection:\n return False\n\n if \"input_filters\" not in collection:\n return False\n\n for filter in collection[\"input_filters\"]:\n if not utils.build_subnet(filter[0]) or filter[1] not in [constants.IP_SRC, constants.IP_DST, constants.IP_EITHER]:\n return False\n\n return collection[\"input_filters\"]", "def filters_active(self):\n if self.is_valid():\n return bool(\n {\n k: v\n for k, v in self.cleaned_data.items()\n if k not in [\"q\", \"sort\"] and bool(v)\n }\n )\n return False", "def filter_form_valid(self, filter_form):\n return True", "def should_filter(self):\n if not getattr(self.view, 'filter_backends', None):\n return False\n\n if self.method.lower() not in [\"get\", \"delete\"]:\n return False\n\n if not isinstance(self.view, GenericViewSet):\n return True\n\n return is_list_view(self.path, self.method, self.view)", "def _apply_filters_incompatible(self, metadata):\n if \"keywords\" in self.filters:\n if not metadata.keywords:\n return True\n if \"features\" in self.filters:\n if not \"features\" in metadata:\n return True\n if \"authors\" in self.filters:\n if not \"authors\" in metadata:\n return True\n if \"version\" in self.filters:\n if not metadata.pylith_version:\n return True\n for verMeta in metadata.pylith_version:\n if not eval(\"{ver} {verMeta}\".format(ver=self.filters[\"version\"], verMeta=verMeta)):\n return True\n return False", "def validate_query(cls, filters: dict) -> dict:\n queried_fields = [\n field.name for field in cls.__fields__ if field.name in filters\n ]\n unknown_fields = [\n field_name for field_name in filters if field_name not in queried_fields\n ]\n known_filters = copy.deepcopy(filters)\n for unknown_field in unknown_fields:\n known_field, field_value = cls._to_known_field(\n unknown_field, known_filters[unknown_field]\n )\n if known_field:\n known_filters.setdefault(known_field.name, {}).update(field_value)\n\n errors = {}\n\n for field in [field for field in cls.__fields__ if field.name in known_filters]:\n errors.update(field.validate_query(known_filters))\n\n return errors", "def isSubset(self, filter1, filter2):\n if not filter2[0] and not filter2[1]:\n return True\n elif not filter2[0] and filter2[1]:\n raise ValueError(\"Facility ALL but User not ALL\")\n elif filter2[0]:\n if filter1[0] == filter2[0]:\n if not filter2[1]:\n return True\n elif filter2[1] and filter2[1] == filter1[1]:\n return True\n return False", "def test_filter_multiple(self):\n self.es.register_filter(foo=False, bar='baz')\n self.assertFalse(self.es.streamfilter(self.data))\n self.es.filter = {'all': [], 'any': [], 'none': []}\n self.es.register_filter(foo=True, bar='baz')\n self.assertTrue(self.es.streamfilter(self.data))\n # check whether filter functions are different\n f, g = self.es.filter['all']\n c = {'foo': True}\n self.assertNotEqual(f(c), g(c))\n c = {'bar': 'baz'}\n self.assertNotEqual(f(c), g(c))", "def filter(self, filters):" ]
[ "0.6807991", "0.6739831", "0.6739831", "0.6700357", "0.65870214", "0.63247246", "0.6261503", "0.6168656", "0.61411965", "0.6037914", "0.5961685", "0.59602743", "0.59555244", "0.5937219", "0.59234697", "0.5908777", "0.5897933", "0.5850972", "0.58172536", "0.5808177", "0.57586205", "0.5741931", "0.5728953", "0.57124597", "0.57058805", "0.57037777", "0.56752545", "0.5665832", "0.56640154", "0.56533486" ]
0.79034907
0
Creates a scatter query from the given user query.
def _create_scatter_query(query, num_splits): scatter_query = query_pb2.Query() for kind in query.kind: scatter_kind = scatter_query.kind.add() scatter_kind.CopyFrom(kind) # ascending order datastore_helper.add_property_orders(scatter_query, SCATTER_PROPERTY_NAME) # There is a split containing entities before and after each scatter entity: # ||---*------*------*------*------*------*------*---|| * = scatter entity # If we represent each split as a region before a scatter entity, there is an # extra region following the last scatter point. Thus, we do not need the # scatter entity for the last region. scatter_query.limit.value = (num_splits - 1) * KEYS_PER_SPLIT datastore_helper.add_projection(scatter_query, KEY_PROPERTY_NAME) return scatter_query
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_query(self, query):\n query = pylastica.query.Query.create(query)\n data = query.to_dict()\n return self.set_param('query', data['query'])", "def _make_query(self):\r\n raise NotImplementedError()", "def make_query(self, query, data: Dict):\n return query.format_map(data)", "def _gen_cat_query(self,query_fields=None):\n if query_fields is None:\n object_id_fields = ['decals_id','brick_primary','brickid','ra','dec','gaia_pointsource']\n mag_fields = ['mag_g','mag_r','mag_z','mag_w1','mag_w2','mag_w3','mag_w4']\n snr_fields = ['snr_g','snr_r','snr_z','snr_w1','snr_w2','snr_w3','snr_w4']\n query_fields = object_id_fields+mag_fields+snr_fields\n \n database = \"ls_dr7.tractor\"\n self.query = dlsurvey._default_query_str(query_fields, database, self.coord, self.radius)", "def make_query(self,user_id=None, tenant_id=None, resource_id=None,\n user_ids=None, tenant_ids=None, resource_ids=None):\n user_ids = user_ids or []\n tenant_ids = tenant_ids or []\n resource_ids = resource_ids or []\n\n query = []\n if user_id:\n user_ids = [user_id]\n for u_id in user_ids:\n query.append({\"field\": \"user_id\", \"op\": \"eq\", \"value\": u_id})\n\n if tenant_id:\n tenant_ids = [tenant_id]\n for t_id in tenant_ids:\n query.append({\"field\": \"project_id\", \"op\": \"eq\", \"value\": t_id})\n\n if resource_id:\n resource_ids = [resource_id]\n for r_id in resource_ids:\n query.append({\"field\": \"resource_id\", \"op\": \"eq\", \"value\": r_id})\n\n return query", "def showGqlQuery(query):\n proto = query._proto_query\n kind = query._model_class.kind()\n filters = proto.filters()\n boundfilters = proto._GQL__bound_filters\n orderings = proto.orderings()\n hint = proto.hint()\n limit = proto.limit()\n offset = proto._GQL__offset\n\n select = \"SELECT * FROM %s\" % kind\n where = []\n order = []\n\n for k in sorted(filters):\n for clause in filters[k]:\n name, op = clause\n if name==-1: name = 'ANCESTOR'\n where.append(\"%s %s :%s\" % (name, op.upper(), k))\n\n for k in sorted(boundfilters):\n if isinstance(k, tuple):\n op = ' '.join(k)\n else:\n op = k\n where.append(\"%s %r\" % (op, boundfilters[k]))\n\n for p, o in orderings:\n order.append(\"%s %s\" % (p, 'DESC' if o==datastore.Query.DESCENDING else 'ASC'))\n\n gql = select\n if where:\n gql += ' WHERE '+' AND '.join(where)\n if order:\n gql += ' ORDER BY ' + ', '.join(order)\n if limit != -1:\n if offset != -1:\n gql += ' LIMIT %s,%s' % (offset,limit)\n else:\n gql += ' LIMIT %s' % limit\n elif offset != -1:\n gql += ' OFFSET %s' % offset\n return gql", "def make_simple_gaia_query(WHERE=None, ORDERBY=None, user_cols=None,\n FROM=None, all_columns=False,\n panstarrs1=False, gaia_mags=False, user_ASdict=None,\n defaults=None,\n pprint=False):\n\n return make_gaia_query(WHERE=WHERE, ORDERBY=ORDERBY, user_cols=user_cols,\n FROM=FROM, use_AS=True, all_columns=all_columns,\n panstarrs1=panstarrs1, gaia_mags=gaia_mags,\n user_ASdict=user_ASdict, inmostquery=True,\n defaults=defaults, pprint=pprint)", "def _create_query_string(self, query):\n # Check for a result type, if none found, set it to default.\n result_type = query.result_type\n if not result_type:\n result_type = self.default_result_type\n\n # Check to if the result type is valid\n if result_type not in RESULT_TYPES:\n raise QueryParamException(self.name, \"Engine doesn't support query result type '{0}'\"\n .format(query.result_type))\n\n search_params = {'result_type': result_type,\n 'q': query.terms}\n\n query_append = \"search?q={}&type={}&access_token={}\".format\\\n (search_params['q'], search_params['result_type'], self.api_key)\n\n return API_ENDPOINT + encode_symbols(query_append)", "def CustomRetrievalQuery(self, query):\n\n\t\tpoints_dat = self.session.execute(query)\n\n\t\tpoints = []\n\n\t\tfor point_dat in points_dat:\n\t\t\tpoints.append(Point(\n\t\t\t\tDeviceID = point_dat.deviceid,\n\t\t\t\tLatitude = point_dat.latitude,\n\t\t\t\tLongitude = point_dat.longitude,\n\t\t\t\tDateTime = point_dat.datetime))\n\t\treturn points", "def _get_query(query_name):\n query_dict = {\n 'survey_structure': f\"\"\"SELECT * FROM [Survey_Sample_A19].[dbo].[SurveyStructure]\"\"\",\n 'surveys_query': f\"\"\"SELECT SurveyId FROM [Survey_Sample_A19].[dbo].[Survey]\"\"\",\n 'questions_query': f\"\"\"SELECT * FROM (\n SELECT SurveyId, QuestionId, 1 as InSurvey\n FROM SurveyStructure\n WHERE SurveyId = @currentSurveyId \n UNION \n SELECT @currentSurveyId as SurveyId, Q.QuestionId, 0 as InSurvey \n FROM Question as Q \n WHERE NOT EXISTS (\n SELECT *\n FROM SurveyStructure as S\n WHERE S.SurveyId = @currentSurveyId \n AND S.QuestionId = Q.QuestionId \n ) \n ) as t \n ORDER BY QuestionId;\"\"\",\n 'query_template_for_answer_column': f\"\"\" COALESCE((\n SELECT a.Answer_Value \n FROM Answer as a \n WHERE a.UserId = u.UserId\n AND a.SurveyId = <SURVEY_ID> \n AND a.QuestionId = <QUESTION_ID> \n ), -1) AS ANS_Q<QUESTION_ID> \"\"\",\n 'query_template_for_null_column': f\"\"\" NULL AS ANS_Q<QUESTION_ID> \"\"\",\n 'query_template_outer_union_query': f\"\"\" SELECT UserId , <SURVEY_ID> as SurveyId, <DYNAMIC_QUESTION_ANSWERS> \n FROM [User] as u \n WHERE EXISTS (\n SELECT * \n FROM Answer as a \n WHERE u.UserId = a.UserId \n AND a.SurveyId = <SURVEY_ID>\n )\"\"\",\n 'vw_survey_data': f\"\"\"SELECT * FROM [Survey_Sample_A19].[dbo].[vw_AllSurveyData]\"\"\",\n 'edit_view': f\"\"\"CREATE OR ALTER VIEW vw_AllSurveyData AS \"\"\"\n\n }\n return query_dict.get(query_name)", "def make_query(self):", "def query(self, query, authorization_required=True):\n url = 'https://{}/api/v1/graphql'.format(self.host)\n headers = {\n 'Content-Type': 'application/json',\n }\n json = {\n 'query': query,\n }\n # Login if not yet done\n if authorization_required:\n if not self.authorized:\n self.login()\n if self.token:\n headers['Authorization'] = 'Bearer {}'.format(self.token)\n\n request = self.session.post(\n url, headers=headers, json=json,\n verify=self.verify)\n return request", "def _fetch_sample_data_from_user_query(self) -> TableData:\n rnd = self.session.execute(f\"{self._profile_sample_query}\")\n try:\n columns = [col.name for col in rnd.cursor.description]\n except AttributeError:\n columns = list(rnd.keys())\n return TableData(\n columns=columns,\n rows=[list(row) for row in rnd.fetchmany(100)],\n )", "def query2df(query):\n df = pd.DataFrame(data = list(itertools.product([0, 1], repeat=len(query.variables))), columns=query.variables)\n df['p'] = query.values.flatten()\n return df", "def soql_query(self, query):\n self.builtin.log(\"Running SOQL Query: {}\".format(query))\n return self.cumulusci.sf.query_all(query)", "def generate_query(self):\n return", "def gen_q_stmt(name, query):\n return \"query {} `{}`;\\n\".format(name, query)", "def _Dynamic_RunQuery(self, query, query_result, request_id=None):\n if query.has_transaction():\n if not query.has_ancestor():\n raise apiproxy_errors.ApplicationError(\n datastore_pb.Error.BAD_REQUEST,\n 'Only ancestor queries are allowed inside transactions.')\n (filters, orders) = datastore_index.Normalize(query.filter_list(),\n query.order_list(), [])\n \n old_datastore_stub_util.FillUsersInQuery(filters)\n\n if not query.has_app():\n query.set_app(self.project_id)\n self.__ValidateAppId(query.app())\n\n self._RemoteSend(query, query_result, \"RunQuery\", request_id)\n results = query_result.result_list()\n for result in results:\n old_datastore_stub_util.PrepareSpecialPropertiesForLoad(result)\n\n last_cursor = None\n if query_result.has_compiled_cursor():\n last_cursor = query_result.compiled_cursor()\n\n if query_result.more_results():\n new_cursor = InternalCursor(query, last_cursor, len(results))\n cursor_id = self.__getCursorID()\n cursor = query_result.mutable_cursor()\n cursor.set_app(self.project_id)\n cursor.set_cursor(cursor_id)\n self.__queries[cursor_id] = new_cursor\n\n if query.compile():\n compiled_query = query_result.mutable_compiled_query()\n compiled_query.set_keys_only(query.keys_only())\n compiled_query.mutable_primaryscan().set_index_name(query.Encode())", "def query(self, query):", "def create_dataset(self, *args, **kwargs):\n dataset = super().create_dataset(*args, **kwargs)\n length = len(dataset._data)\n dataset.append_col([self.request.user.id] * length,\n header=\"source_user\")\n return dataset", "def create_dataset(self, *args, **kwargs):\n dataset = super().create_dataset(*args, **kwargs)\n length = len(dataset._data)\n dataset.append_col([self.request.user.id] * length,\n header=\"source_user\")\n return dataset", "def _make_query_SELECT(user_cols=None, use_AS=True,\n all_columns=False, gaia_mags=False, panstarrs1=False,\n query=None, defaults=None):\n\n ####################\n # Defaults\n\n defaults = _make_query_defaults(defaults)\n\n if use_AS is False: # replace with blank dict with asdict keys\n defaults['asdict'] = {k: '' for k in defaults['asdict']}\n\n # Start new query if one not provided\n if query is None:\n query = \"\"\n\n ####################\n # Building Selection\n\n # SELECT\n query += '--Data Columns:\\nSELECT\\n--GaiaDR2 Columns:\\n'\n query += defaults['gaia cols']\n\n if gaia_mags is True:\n query += ',\\n--GaiaDR2 Magnitudes and Colors:\\n'\n query += defaults['gaia mags']\n\n if all_columns is True:\n query += \",\\n--All Columns:\\n*\"\n\n if panstarrs1 is True:\n query += ',\\n--Adding PanSTARRS Columns:\\n'\n query += defaults['panstarrs cols']\n\n ####################\n # (Possible) User Input\n\n # Replacing {} with _asdict\n query = query.format(**defaults['asdict'])\n\n if user_cols is None:\n query += '\\n'\n elif not isinstance(user_cols, str):\n raise TypeError('user_sel is not a (str)')\n elif user_cols == '':\n query += '\\n'\n else:\n query += ',\\n\\n--Custom Selection & Assignement:'\n if user_cols[:1] != '\\n':\n user_cols = '\\n' + user_cols\n if user_cols[-1] == ',':\n user_cols = user_cols[:-1]\n query += user_cols\n\n ####################\n # Return\n return query", "def visit_query(self, query):\n return query", "def df_from_query(query, carto_sql_client, is_org_user, username,\n tablename=None, debug=False):\n if tablename:\n create_table = '''\n CREATE TABLE {tablename} As\n SELECT *\n FROM ({query}) As _wrap;\n SELECT CDB_CartodbfyTable('{org}', '{tablename}');\n '''.format(tablename=tablename,\n query=query,\n org=username if is_org_user else 'public')\n if debug: print(\"Creating table: {}\".format(create_table))\n resp = carto_sql_client.send(create_table)\n if debug: print(resp)\n new_tablename = resp['rows'][0]['cdb_cartodbfytable']\n table_resp = carto_sql_client.send(\n 'SELECT * FROM {tablename}'.format(tablename=new_tablename))\n if debug: print(table_resp)\n schema = transform_schema(table_resp['fields'])\n if table_resp['total_rows'] > 0:\n return pd.DataFrame(table_resp['rows']).set_index('cartodb_id').astype(schema)\n else:\n return pd.DataFrame(data=table_resp['rows'],\n columns=[k for k in table_resp['fields']],\n index=[]).astype(schema)\n else:\n resp = carto_sql_client.send(query)\n schema = transform_schema(resp['fields'])\n return pd.DataFrame(resp['rows']).astype(schema)\n\n return None", "async def summary_census(myquery: UserRequestModel):\n age = myquery.age\n class_of_worker = myquery.class_of_worker\n det_ind_code = myquery.industry_code\n det_occ_code = myquery.occupation_code\n marital_stat = myquery.marital_status\n major_ind_code = myquery.major_industry_code\n major_occ_code = myquery.major_occupation_code\n hisp_origin = myquery.hispanic_origin\n sex = myquery.sex\n age = str(age)\n det_ind_code = str(det_ind_code)\n det_occ_code = str(det_occ_code)\n filter_query = \"\"\"\n WITH data AS (\n WITH data_occ AS (\n WITH data_class AS(\n WITH person_total AS (\n WITH person_edu AS (\n WITH person_sex AS (\n WITH person_race AS (\n WITH person_hisp AS (\n SELECT p1.id_person, p1.age, p1.year, p1.marital_stat, p1.race, \n p1.education, p1.sex, hsp.hisp_origin FROM person_tbl as p1\n INNER JOIN hisp_origin_tbl as hsp ON hsp.id = p1.hisp_origin\n )\n SELECT r.race, p2.id_person, p2.age, p2.year, p2.marital_stat,\n p2.education, p2.hisp_origin, p2.sex FROM race_tbl as r \n INNER JOIN person_hisp as p2 ON p2.race = r.id\n )\n SELECT p3.id_person, p3.race, p3.age, p3.year, p3.education, p3.hisp_origin,\n p3.sex, ms.marital_stat FROM person_race AS p3\n INNER JOIN martial_status_tbl as ms ON ms.id = p3.marital_stat\n )\n SELECT p4.id_person, p4.race, p4.age, p4.year, p4.marital_stat, p4.education, \n p4.hisp_origin, sex_tbl.sex FROM person_sex AS p4\n INNER JOIN sex_tbl ON sex_tbl.id = p4.sex\n )\n SELECT p5.id_person, p5.race, p5.age, p5.year, p5.marital_stat, edu.education,\n p5.hisp_origin, p5.sex FROM person_edu as p5\n INNER JOIN education_tbl as edu ON edu.id = p5.education\n )\n SELECT p.id_person, p.race, p.age, p.year, p.marital_stat, p.education, p.hisp_origin, \n p.sex, e.det_occ_code, e.wage_per_hour, e.union_member, e.unemp_reason,\n e.own_or_self, e.weeks_worked, e.income_50k, e.class_worker FROM person_total AS p\n INNER JOIN employee_tbl as e ON e.id_person=p.id_person\n )\n SELECT dcl.id_person, dcl.race, dcl.age, dcl.year, dcl.marital_stat, dcl.education, dcl.hisp_origin,\n dcl.sex, dcl.wage_per_hour, dcl.union_member, dcl.unemp_reason, dcl.own_or_self,\n dcl.weeks_worked, dcl.income_50k, dcl.det_occ_code, cw.class_worker FROM data_class as dcl\n INNER JOIN class_worker_tbl as cw ON cw.id = dcl.class_worker\n )\n SELECT docc.id_person, docc.race, docc.age, docc.year, docc.marital_stat, docc.education, docc.hisp_origin,\n docc.sex, docc.wage_per_hour, docc.union_member, docc.unemp_reason, docc.own_or_self,\n docc.weeks_worked, docc.income_50k, mo.major_occ_code, mo.det_ind_code, docc.class_worker,\n docc.det_occ_code FROM data_occ as docc\n INNER JOIN det_occ_code_tbl as mo ON mo.det_occ_code = docc.det_occ_code\n )\n SELECT data.id_person, data.race, data.age, data.year, data.marital_stat, data.education, data.hisp_origin,\n data.sex, data.wage_per_hour, data.union_member, data.unemp_reason, data.own_or_self, data.class_worker,\n data.weeks_worked, data.income_50k, data.major_occ_code, mi.major_ind_code, \n data.det_ind_code, data.det_occ_code FROM data\n INNER JOIN det_ind_code_tbl as mi ON mi.det_ind_code = data.det_ind_code\n WHERE age = '{}'\"\"\".format(age)\n\n filter_query = filter_query + \" AND class_worker = '{}'\".format(class_of_worker)\n filter_query = filter_query + \" AND data.det_ind_code = '{}'\".format(det_ind_code)\n filter_query = filter_query + \" AND data.det_occ_code = '{}'\".format(det_occ_code) \n\n if None in [marital_stat, major_ind_code, major_occ_code, hisp_origin, sex]:\n if marital_stat is not None:\n filter_query = filter_query + \" AND marital_stat = '{}'\".format(marital_stat)\n if major_ind_code is not None:\n filter_query = filter_query + \" AND major_ind_code = '{}'\".format(major_ind_code)\n if major_occ_code is not None:\n filter_query = filter_query + \" AND major_occ_code = '{}'\".format(major_occ_code)\n if hisp_origin is not None:\n filter_query = filter_query + \" AND hisp_origin = '{}'\".format(hisp_origin)\n if sex is not None:\n filter_query = filter_query + \" AND sex = '{}'\".format(sex) \n\n table_query = filter_query + ';'\n query_to_csv = await database.fetch_all(query=table_query)\n\n # data_file = open('files/filtered_table.csv', 'w', newline='')\n data_file = io.StringIO()\n csv_writer = csv.writer(data_file)\n count = True\n for emp in query_to_csv:\n if count:\n header = emp.keys()\n csv_writer.writerow(header)\n count = False\n csv_writer.writerow(emp.values())\n # data_file.close()\n\n final_block = \"\"\")\n SELECT avg(wage_per_hour) as mean_wage, avg(weeks_worked) as mean_weeks_worked,\n min(wage_per_hour) as min_wage, min(weeks_worked) as min_weeks_worked,\n max(wage_per_hour) as max_wage, max(weeks_worked) as max_weeks_worked,\n sum(income_50k) as person_50k_plus, count(id_person) as num_person\n FROM filter;\"\"\"\n \n filter_query = 'WITH filter AS ( ' + filter_query\n filter_query = filter_query + final_block\n results = await database.fetch_all(query=filter_query)\n\n answer = {}\n for row in results:\n answer.update(dict(row))\n # with open('files/query.json', 'w') as outfile:\n # json.dump(answer, outfile)\n json_writer = json.dumps(answer)#, default=jsonDefault)\n \n # files = ['files/query.json', 'files/filtered_table.csv']\n file_names = ['query.json', 'filtered_table.csv']\n file_objects = [json_writer, data_file.getvalue().encode()]\n files = []\n i = 0\n for f in file_names:\n files.append((f, file_objects[i]))\n i += 1\n\n return zipfiles(files)", "def gws_q(self, query, attribute, data):\n \n gws_q = query.format(attribute, data) #attribute = SQL table\n \n query_df = db.query(gws_q) \n return query_df", "def make_query(self, query):\n return Transaction(self, query)", "def _fetch_sample_data_with_query_object(self) -> Query:\n return self.session.query(self.table).from_statement(\n text(f\"{self._profile_sample_query}\")\n )", "def create_named_query(Name=None, Description=None, Database=None, QueryString=None, ClientRequestToken=None):\n pass", "def createVisit(tx, query, personId, locationId, date, startHour, endHour):\n tx.run(query, personId=personId, locationId=locationId, date=date, startHour=startHour,\n endHour=endHour)" ]
[ "0.5525376", "0.5451804", "0.5393287", "0.5366259", "0.53469455", "0.53204435", "0.53188956", "0.53165925", "0.52957654", "0.5291086", "0.52879786", "0.52722937", "0.5265367", "0.5225464", "0.5224158", "0.51896924", "0.5150565", "0.5137202", "0.51339495", "0.5131162", "0.5131162", "0.5102349", "0.50810593", "0.5080404", "0.5078866", "0.5059333", "0.50538933", "0.5039947", "0.5033832", "0.4997745" ]
0.71407485
0
Given a list of keys and a number of splits find the keys to split on.
def _get_split_key(keys, num_splits): # If the number of keys is less than the number of splits, we are limited # in the number of splits we can make. if not keys or (len(keys) < (num_splits - 1)): return keys # Calculate the number of keys per split. This should be KEYS_PER_SPLIT, # but may be less if there are not KEYS_PER_SPLIT * (numSplits - 1) scatter # entities. # # Consider the following dataset, where - represents an entity and # * represents an entity that is returned as a scatter entity: # ||---*-----*----*-----*-----*------*----*----|| # If we want 4 splits in this data, the optimal split would look like: # ||---*-----*----*-----*-----*------*----*----|| # | | | # The scatter keys in the last region are not useful to us, so we never # request them: # ||---*-----*----*-----*-----*------*---------|| # | | | # With 6 scatter keys we want to set scatter points at indexes: 1, 3, 5. # # We keep this as a float so that any "fractional" keys per split get # distributed throughout the splits and don't make the last split # significantly larger than the rest. num_keys_per_split = max(1.0, float(len(keys)) / (num_splits - 1)) split_keys = [] # Grab the last sample for each split, otherwise the first split will be too # small. for i in range(1, num_splits): split_index = int(round(i * num_keys_per_split) - 1) split_keys.append(keys[split_index]) return split_keys
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _choose_split_points(cls, sorted_keys, shard_count):\n assert len(sorted_keys) >= shard_count\n index_stride = len(sorted_keys) / float(shard_count)\n return [sorted_keys[int(round(index_stride * i))]\n for i in range(1, shard_count)]", "def split_list(l, k):\n\n\tn = len(l)\n\tsublists = []\n\tnsubs = n / k\n\tnrems = n % k\n\n\t# little algo to split lists.\n\n\ti = int(0)\n\twhile i < n:\n\t\tsublists.append(l[i:i+k])\n\t\ti += k\n\n\treturn sublists", "def get_splits(datastore, query, num_splits, partition=None):\n\n # Validate that the number of splits is not out of bounds.\n if num_splits < 1:\n raise ValueError('The number of splits must be greater than 0.')\n\n if num_splits == 1:\n return [query]\n\n _validate_query(query)\n\n splits = []\n scatter_keys = _get_scatter_keys(datastore, query, num_splits, partition)\n last_key = None\n for next_key in _get_split_key(scatter_keys, num_splits):\n splits.append(_create_split(last_key, next_key, query))\n last_key = next_key\n\n splits.append(_create_split(last_key, None, query))\n return splits", "def make_train_val_test_split_inchikey_lists(train_inchikey_list,\n train_inchikey_dict,\n train_val_test_split_fractions,\n holdout_inchikey_list=None,\n splitting_type='random'):\n if not np.isclose([sum(train_val_test_split_fractions)], [1.0]):\n raise ValueError('Must specify train_val_test_split that sums to 1.0')\n\n if holdout_inchikey_list:\n # filter out those inchikeys that are in the holdout set.\n train_inchikey_list = [\n ikey for ikey in train_inchikey_list\n if ikey not in holdout_inchikey_list\n ]\n\n if splitting_type == 'random':\n return get_random_inchikeys(train_inchikey_list,\n train_val_test_split_fractions)\n else:\n # Assume that splitting_type is the name of a structure family.\n # get_inchikeys_by_family will throw an error if this is not supported.\n return get_inchikeys_by_family(\n train_inchikey_list,\n train_inchikey_dict,\n train_val_test_split_fractions,\n family_name=splitting_type,\n exclude_from_train=True)", "def qids_to_splits(qids):\n qidmap = {}\n i = 0\n for qid in qids:\n if not qid in qidmap:\n qidmap[qid] = i\n i+=1\n new_qids = []\n for qid in qids:\n new_qids.append(qidmap[qid])\n qidcount = np.max(new_qids)+1\n splits = [[] for i in range(qidcount)]\n for i, qid in enumerate(new_qids):\n splits[qid].append(i) \n return splits", "def _get_indices(self, parts: List[str], keys: List[str]):\n for key in keys:\n yield parts.index(key)", "def split_in_half(keys_56bits):\n left_keys, right_keys = keys_56bits[:28], keys_56bits[28:]\n return left_keys, right_keys", "def get_random_inchikeys(inchikey_list, train_val_test_split_fractions):\n random.shuffle(inchikey_list)\n\n train_num = int(train_val_test_split_fractions.train * len(inchikey_list))\n val_num = int(train_val_test_split_fractions.validation * len(inchikey_list))\n\n return TrainValTestInchikeys(inchikey_list[:train_num],\n inchikey_list[train_num:train_num + val_num],\n inchikey_list[train_num + val_num:])", "def isplit(iterable, splitters):\n return [list(g) for k,g in itertools.groupby(iterable,lambda x:x in splitters) if not k]", "def _tokens_partitions(tokens, min_number_of_tokens, number_of_partitions):\n if len(tokens) < min_number_of_tokens:\n # In this case we have few token and thus we split them\n tkns_per_partition = min_number_of_tokens / number_of_partitions\n step_size = ((2 ** 64) - 1) / min_number_of_tokens\n partition = []\n for fraction, to in tokens:\n while fraction < to - step_size:\n partition.append((fraction, fraction + step_size))\n fraction += step_size\n if len(partition) >= tkns_per_partition:\n yield partition\n partition = []\n # Adding the last token\n partition.append((fraction, to))\n if len(partition) > 0:\n yield partition\n else:\n # This is the case we have more tokens than partitions,.\n splits = max(len(tokens) / number_of_partitions, 1)\n\n for i in xrange(0, len(tokens), splits):\n yield tokens[i:i + splits]\n if len(tokens) % splits > 0:\n yield tokens[len(tokens) / splits * splits + 1:]", "def _getbundlelistkeysparts(\n bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs\n):\n listkeys = kwargs.get('listkeys', ())\n for namespace in listkeys:\n part = bundler.newpart(b'listkeys')\n part.addparam(b'namespace', namespace)\n keys = repo.listkeys(namespace).items()\n part.data = pushkey.encodekeys(keys)", "def split(a, n):\n n = min(n, len(a))\n k, m = divmod(len(a), n)\n return [a[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n)]", "def split(a, n):\n k, m = divmod(len(a), n)\n ret = [a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n)]\n return ret", "def group_by_keys(param_list, keys):\n\tkeys = list(keys)\n\tnames = {}\n\tfor p in param_list:\n\t\t\n\t\tif len(keys) > 0:\n\t\t\tkey = join_params(**{k: p.get(k, None) for k in keys})\n\t\t\t#vals = {k: p.get(k, None) for k in keys}\n\t\t\t#name = join_params(**vals)\n\t\t\t#names[name]=vals\n\t\telse:\n\t\t\tkey = ''\n\t\tif key in names:\n\t\t\tnames[key].append(p)\n\t\telse:\n\t\t\tnames[key]=[p]\n\treturn names", "def _split_dict(self, d, splits):\r\n ret = []\r\n for split in splits:\r\n dict_split = defaultdict(list)\r\n for f in split:\r\n if f in d:\r\n dict_split[f] = d[f]\r\n ret.append(dict_split)\r\n return ret", "def split(base_list):\n list_mid_pointer=len(base_list)//2\n return base_list[:list_mid_pointer],base_list[list_mid_pointer:]", "def _get_indices_split ( indices, number_of_folds ):\n # Split the indicies by the number of folds\n return np.array_split ( indices, indices_or_sections = number_of_folds )\n # End get_indices_split()", "def _get_indices_split ( indices, number_of_folds ):\n # Split the indicies by the number of folds\n return np.array_split ( indices, indices_or_sections = number_of_folds )\n # End get_indices_split()", "def split_fields(request, k):\n fields = re.split(u'[\\|\\/\\u015a]', unicode(request.strip()))\n return list(itertools.chain(fields, [None] * k))[:k]", "def _get_inter_splits_by_group(items_and_groups, split_probs, split_number):\n groups = sorted(set(group_id for item_id, group_id in items_and_groups))\n rng = np.random.RandomState(split_number)\n rng.shuffle(groups)\n\n split_boundaries = _compute_split_boundaries(split_probs, len(groups))\n group_id_to_split = {}\n for split_name, i_start, i_end in split_boundaries:\n for i in range(i_start, i_end):\n group_id_to_split[groups[i]] = split_name\n\n split_to_ids = collections.defaultdict(set)\n for item_id, group_id in items_and_groups:\n split = group_id_to_split[group_id]\n split_to_ids[split].add(item_id)\n\n return split_to_ids", "def split_list(list_in,number_of_pieces):\n output_length = len(list_in) / number_of_pieces\n output = []\n piece = []\n counter = 0\n for list_item in list_in:\n counter += 1\n piece.append(list_item)\n if counter >= output_length:\n output.append(piece)\n counter = 0\n piece = []\n # Make sure nothing is missed\n if len(piece) > 0:\n output.append(piece)\n return output", "def splits(text, L=20):\n return [(text[:i+1], text[i+1:]) \n for i in range(min(len(text), L))]", "def partition_files(list_of_files, number_of_parts):\n return np.array_split(list_of_files, number_of_parts)", "def kfold_split(subj_ids, n_splits, groups=None, **kwargs):\n kfoldClass = KFold if groups is None else ShuffleGroupKFold\n kfold = kfoldClass(n_splits, shuffle=True, **kwargs)\n return [split for split in kfold.split(X=subj_ids, groups=groups)]", "def _get_scatter_keys(datastore, query, num_splits, partition):\n scatter_point_query = _create_scatter_query(query, num_splits)\n\n key_splits = []\n while True:\n req = datastore_pb2.RunQueryRequest()\n if partition:\n req.partition_id.CopyFrom(partition)\n\n req.query.CopyFrom(scatter_point_query)\n\n resp = datastore.run_query(req)\n for entity_result in resp.batch.entity_results:\n key_splits.append(entity_result.entity.key)\n\n if resp.batch.more_results != query_pb2.QueryResultBatch.NOT_FINISHED:\n break\n\n scatter_point_query.start_cursor = resp.batch.end_cursor\n scatter_point_query.limit.value -= len(resp.batch.entity_results)\n\n key_splits.sort(helper.key_comparator)\n return key_splits", "def smart_split(strokes):\n\n splited = []\n for stroke in strokes:\n splited += stroke.split_non_differentiable_points()\n return splited", "def get_inchikeys_by_family(inchikey_list,\n inchikey_dict,\n train_val_test_split_fractions,\n family_name='steroid',\n exclude_from_train=True):\n _, val_fraction, test_fraction = train_val_test_split_fractions\n if val_fraction == 0.0 and test_fraction == 0.0:\n val_fraction = 0.5\n test_fraction = 0.5\n\n substructure_filter_fn = feature_utils.make_filter_by_substructure(\n family_name)\n family_inchikeys = []\n nonfamily_inchikeys = []\n\n for ikey in inchikey_list:\n if substructure_filter_fn(inchikey_dict[ikey][0]):\n family_inchikeys.append(ikey)\n else:\n nonfamily_inchikeys.append(ikey)\n\n if exclude_from_train:\n val_test_inchikeys, train_inchikeys = (family_inchikeys,\n nonfamily_inchikeys)\n else:\n train_inchikeys, val_test_inchikeys = (family_inchikeys,\n nonfamily_inchikeys)\n\n random.shuffle(val_test_inchikeys)\n val_num = int(\n val_fraction / (val_fraction + test_fraction) * len(val_test_inchikeys))\n return TrainValTestInchikeys(train_inchikeys, val_test_inchikeys[:val_num],\n val_test_inchikeys[val_num:])", "def split_list(self):\n wanted_parts = self.args.ncore\n alist = glob.glob(self.args.input + '*.root')\n length = len(alist)\n return [alist[i * length // wanted_parts: (i + 1) * length // wanted_parts]\n for i in range(wanted_parts)]", "def split(self, splits, catchall=False):\r\n raise NotImplementedError()", "def split_chunks(item_list, num_items_in_list):\n for item in range(0, len(item_list), num_items_in_list):\n # Create an index range for item_list of num_items_in_list items:\n yield item_list[item:item + num_items_in_list]" ]
[ "0.6757466", "0.6268931", "0.6221212", "0.6128692", "0.5942825", "0.588038", "0.5819608", "0.58183414", "0.5790286", "0.57337314", "0.57150143", "0.56872034", "0.56827277", "0.56672984", "0.5664926", "0.5642625", "0.56417096", "0.56417096", "0.5603416", "0.55761635", "0.5556821", "0.5537703", "0.55271125", "0.55245924", "0.5522117", "0.5519939", "0.55103904", "0.55045116", "0.5487179", "0.54690945" ]
0.72753096
0
Board must count number of connections to origin properly.
def test_count_connected(self): data = [[0, 1, 0], [1, 0, 0], [0, 0, 1]] board = Board(data) self.assertEquals(board.count_connected(), 1) data = [[1, 1, 0], [1, 0, 0], [0, 0, 1]] board = Board(data) self.assertEquals(board.count_connected(), 3) data = [[0, 0, 0], [0, 0, 0], [0, 0, 1]] board = Board(data) self.assertEquals(board.count_connected(), 8) data = [[1, 1, 1], [1, 1, 1], [1, 1, 1]] board = Board(data) self.assertEquals(board.count_connected(), 9)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __len__(self):\r\n return len(self.board)", "def play_round_Conway_Cell(self):\n for x in self.board:\n for f in x:\n f.live_neighbors = 0\n\n for i in range(1, self.cols - 1):\n for j in range(1, self.rows - 1):\n status = self.board[i][j].status\n assert type(status)==int \n\n for m in range(i - 1, i + 2):\n for n in range(j - 1, j + 2):\n self.board[m][n].live_neighbors += status\n self.board[i][j].live_neighbors -= status", "def _board_is_full(self):\n return (self.get_counts()[0] + self.get_counts()[1] == self._num_rows * self._num_cols)", "def init_board(self, size):\n # One entry for every node, if diamond all will be filled with pieces, if triange half of matrix including \n # diagonal from top left to bottom right will be filled\n self.board = [[False for i in range(size)] for j in range(size)] \n\n # One entry for every node pair (i, j), where cM(i, j) = direction enum if there is a connection from i to j. \n # (i, i) does not have a connection\n self.connection_matrix = [[False for i in range(size*size)] for j in range(size*size)]\n if self.shape == ShapeType.DIAMOND:\n for node_i in range(size*size):\n top_boundry = node_i < size # Check if node is on top of board\n left_boundry = node_i % size == 0 # Check if node is in leftmost column in board\n right_boundry = (node_i + 1) % size == 0 # Check if node is in rightmost column in board\n bottom_boundry = node_i > size*size-1-size # Check if node is in bottommost coulmn in board\n \n # See docs/Diamond_Connection_Matrix.png for visualization\n if not top_boundry:\n self.connection_matrix[node_i][node_i-size] = DirectionType.UP_RIGHT\n if not top_boundry and not right_boundry:\n self.connection_matrix[node_i][node_i-size+1] = DirectionType.RIGHT\n if not right_boundry:\n self.connection_matrix[node_i][node_i+1] = DirectionType.DOWN_RIGHT\n if not bottom_boundry:\n self.connection_matrix[node_i][node_i+size] = DirectionType.DOWN_LEFT\n if not bottom_boundry and not left_boundry:\n self.connection_matrix[node_i][node_i+size-1] = DirectionType.LEFT\n if not left_boundry:\n self.connection_matrix[node_i][node_i-1] = DirectionType.UP_LEFT\n \n elif self.shape == ShapeType.TRIANGLE:\n for node_i in range(size*size):\n # check if node_i is in the empty triangle. \n # No proof for this but some sketching suggested the formula, and the formula worked with empirical testing\n # for many different sizes\n # == gives on diagonal to the right of main diagonal through matrix, greater gives the numbers on the rest of the row\n # basic intuition: size-node_i//size-1 gives how many of the nodes on a row in the board matrix are empty, \n # and the rest checks if the node_i is in such an area\n triangle_check = node_i%size >= size - (size - node_i//size - 1) \n if triangle_check: # If it is in the empty side there should be no connections so skip ahead\n continue\n\n top_boundry = node_i < size # Checks if node is on top of board\n left_boundry = node_i % size == 0 # Check if node is in leftmost column in board\n right_boundry = (node_i + 1) % size == 0 # Check if node is in rightmost column in board\n bottom_boundry = node_i > size*size-1-size # Check if node is in bottommost coulmn in board\n diagonal_boundry = node_i%(size+1) == 0 # Check if node is on diagonal in board\n\n # See docs/Triangle_Connection_Matrix.png for visualization\n if not top_boundry and not diagonal_boundry:\n self.connection_matrix[node_i][node_i-size] = DirectionType.UP_RIGHT\n if not right_boundry and not diagonal_boundry:\n self.connection_matrix[node_i][node_i+1] = DirectionType.RIGHT\n if not right_boundry and not bottom_boundry:\n self.connection_matrix[node_i][node_i+size+1] = DirectionType.DOWN_RIGHT\n if not bottom_boundry:\n self.connection_matrix[node_i][node_i+size] = DirectionType.DOWN_LEFT\n if not left_boundry:\n self.connection_matrix[node_i][node_i-1] = DirectionType.LEFT\n if not left_boundry and not top_boundry:\n self.connection_matrix[node_i][node_i-size-1] = DirectionType.UP_LEFT", "def initBoard(self):\n pass", "def init_safeboard() ->None:\r\n for x in range(shape):\r\n for y in range(shape):\r\n if conflict_space[x, y] != 0:\r\n safeboard[x, y] = 1", "def gameOfLife(self, board: List[List[int]]) -> None:\n self.board = copy.deepcopy(board)\n self.rows = len(self.board)\n self.cols = len(self.board[0])\n for i in range(self.rows):\n for j in range(self.cols):\n neighbors = self.count_neighbors(i, j)\n if board[i][j] == 1:\n if neighbors < 2 or neighbors > 3:\n board[i][j] = 0\n else:\n if neighbors == 3:\n board[i][j] = 1", "def corners(self, board):\n # Calculating already captured corners\n computer_corners = 0\n computer_corners = computer_corners + 1 if board[0][0] == self.computer_num else computer_corners\n computer_corners = computer_corners + 1 if board[0][\n self.board_size - 1] == self.computer_num else computer_corners\n computer_corners = computer_corners + 1 if board[self.board_size - 1][\n 0] == self.computer_num else computer_corners\n computer_corners = computer_corners + 1 if board[self.board_size - 1][\n self.board_size - 1] == self.computer_num else computer_corners\n\n opponent_corners = 0\n opponent_corners = opponent_corners + 1 if board[0][0] == self.opponent_num else opponent_corners\n opponent_corners = opponent_corners + 1 if board[0][\n self.board_size - 1] == self.opponent_num else opponent_corners\n opponent_corners = opponent_corners + 1 if board[self.board_size - 1][\n 0] == self.opponent_num else opponent_corners\n opponent_corners = opponent_corners + 1 if board[self.board_size - 1][\n self.board_size - 1] == self.opponent_num else opponent_corners\n\n # Calculating potential corners\n valid_moves_computer = self.game.find_valid_moves(self.computer_color, board, self.board_size)\n computer_potential_corner = 0\n computer_potential_corner = computer_potential_corner + 1 if valid_moves_computer[0][\n 0] == 1 else computer_potential_corner\n computer_potential_corner = computer_potential_corner + 1 if valid_moves_computer[0][\n self.board_size - 1] == 1 else computer_potential_corner\n computer_potential_corner = computer_potential_corner + 1 if valid_moves_computer[self.board_size - 1][\n 0] == 1 else computer_potential_corner\n computer_potential_corner = computer_potential_corner + 1 if valid_moves_computer[self.board_size - 1][\n self.board_size - 1] == 1 else computer_potential_corner\n\n valid_moves_opponent = self.game.find_valid_moves(self.opponent_color, board, self.board_size)\n opponent_potential_corner = 0\n opponent_potential_corner = opponent_potential_corner + 1 if valid_moves_opponent[0][\n 0] == 1 else opponent_potential_corner\n opponent_potential_corner = opponent_potential_corner + 1 if valid_moves_opponent[0][\n self.board_size - 1] == 1 else opponent_potential_corner\n opponent_potential_corner = opponent_potential_corner + 1 if valid_moves_opponent[self.board_size - 1][\n 0] == 1 else opponent_potential_corner\n opponent_potential_corner = opponent_potential_corner + 1 if valid_moves_opponent[self.board_size - 1][\n self.board_size - 1] == 1 else opponent_potential_corner\n\n # Calculating potential corners for both players\n valid_moves = valid_moves_opponent + valid_moves_computer\n common_potential_corner = 0\n common_potential_corner = common_potential_corner + 1 if valid_moves[0][\n 0] == 2 else common_potential_corner\n common_potential_corner = common_potential_corner + 1 if valid_moves[0][\n self.board_size - 1] == 1 else common_potential_corner\n common_potential_corner = common_potential_corner + 1 if valid_moves[self.board_size - 1][\n 0] == 2 else common_potential_corner\n common_potential_corner = common_potential_corner + 1 if valid_moves[self.board_size - 1][\n self.board_size - 1] == 2 else common_potential_corner\n computer_potential_corner -= common_potential_corner\n opponent_potential_corner -= common_potential_corner\n\n numerator = computer_corners + computer_potential_corner - common_potential_corner - opponent_corners - opponent_potential_corner\n denominator = computer_corners + computer_potential_corner + common_potential_corner + opponent_corners \\\n + opponent_potential_corner\n if denominator == 0:\n return 0\n return 100 * numerator / denominator", "def _check_integrity(self):\n\n count = 0\n for (x, y) in self.__players[ChessGame.BLACK].union(\n self.__players[ChessGame.WHITE]):\n assert (x, y) in self.__board\n count += 1\n\n assert count == len(self.__board)", "def next_boards_connectfour(board) :\n boards=[]\n for i in range(7):\n if not board.is_column_full(i) and not is_game_over_connectfour(board):\n boards.append(board.add_piece(i))\n return boards\n #raise NotImplementedError", "def corners_player(self, board):\n valid_moves = self.game.find_valid_moves(self.computer_color, board, self.board_size)\n rows, columns = np.where(valid_moves == 1)\n max_corners = -200\n location = (-2, -2)\n for i in range(len(rows)):\n temp_board = np.copy(board)\n temp_board = self.game.flip_opponent_stones((rows[i], columns[i]), temp_board, self.board_size,\n self.computer_num, self.opponent_num)\n corners_value = self.stone_parity(temp_board)\n if corners_value > max_corners:\n max_corners = corners_value\n location = (rows[i], columns[i])\n return location", "def test_board_not_full(self):\n\n self.controller.model.board[0][0] = '-'\n actual = self.controller.check_tie()\n self.assertFalse(actual)", "def setup(self):\n self.board[(3, 3)] = -1\n self.board[(3, 4)] = -1\n self.board[(4, 3)] = 1\n self.board[(4, 4)] = 1\n\n self.stones_set = 4", "def checkBoardValid(self):\n for i in range(9):\n for j in range(9):\n if self.board[i, j] == 0:\n continue\n\n if not self.isPossibleAssign((i, j), self.board[i, j]):\n return False\n\n return True", "def check_boards(self):\n succesful = True\n marker = self.game.player\n print(f\"-----Starting check_winning_boards-----\")\n winning_boards= [\n [\n [marker]*3,\n [\" \"]*3,\n [\" \"]*3\n ],\n [\n [\" \"]*3,\n [marker]*3,\n [\" \"]*3\n ],\n [\n [\" \"]*3,\n [\" \"]*3,\n [marker]*3\n ],\n [\n [marker, \" \", \" \"],\n [marker, \" \", \" \"],\n [marker, \" \", \" \"]\n ],\n [\n [\" \",marker, \" \"],\n [\" \",marker, \" \"],\n [\" \",marker, \" \"]\n ],\n [\n [\" \", \" \",marker],\n [\" \", \" \",marker],\n [\" \", \" \",marker]\n ],\n [\n [marker, \" \", \" \"]\n ,[\" \", marker,\" \"],\n [\" \", \" \",marker]\n ],\n [\n [\" \", \" \", marker],\n [\" \",marker, \" \"],\n [marker, \" \", \" \"]\n ]\n ]\n for board in winning_boards:\n if self.game.check_win_conditions(board) != -10:\n succesful = False\n print(f\"board failed checkWins \\n{board}\")\n marker = self.game.ai_player\n print(f\"-----Starting check_winning_boards-----\")\n winning_boards= [\n [\n [marker]*3,\n [\" \"]*3,\n [\" \"]*3\n ],\n [\n [\" \"]*3,\n [marker]*3,\n [\" \"]*3\n ],\n [\n [\" \"]*3,\n [\" \"]*3,\n [marker]*3\n ],\n [\n [marker, \" \", \" \"],\n [marker, \" \", \" \"],\n [marker, \" \", \" \"]\n ],\n [\n [\" \",marker, \" \"],\n [\" \",marker, \" \"],\n [\" \",marker, \" \"]\n ],\n [\n [\" \", \" \",marker],\n [\" \", \" \",marker],\n [\" \", \" \",marker]\n ],\n [\n [marker, \" \", \" \"]\n ,[\" \", marker,\" \"],\n [\" \", \" \",marker]\n ],\n [\n [\" \", \" \", marker],\n [\" \",marker, \" \"],\n [marker, \" \", \" \"]\n ]\n ]\n for board in winning_boards:\n if self.game.check_win_conditions(board) != 10:\n succesful = False\n print(f\"board failed checkWins \\n{board}\")\n \n tie_boards = [\n [ \n [\"O\",\"O\",\"X\"],\n [\"X\",\"O\",\"O\"],\n [\"X\",\"X\",\" \"]\n ],\n [\n [\"O\",\"X\",\" \"],\n [\" \",\"X\",\" \"],\n [\" \",\"O\",\" \"]\n ],\n [\n ['O', 'O', 'X'],\n ['X', 'X', 'O'],\n ['O', 'O', 'X']\n ]\n ]\n for board in tie_boards:\n if self.game.check_win_conditions(board) != 0:\n succesful = False\n print(f\"board failed checkWins \\n{board}\")\n\n print(f\"-----Ending check_winning_boards-----\")", "def set_board(board):", "def __generate_goal_board(self):\n element = 1\n array = []\n\n for row in range(self._n):\n row_to_append = []\n for col in range(self._n):\n row_to_append.append(element)\n element += 1\n array.append(row_to_append)\n\n array[self._n - 1][self._n - 1] = 0\n self._solved_board = Board(array=array, space=[self._n - 1, self._n - 1])", "def __init__(self, python_board: list[list[int]] = None, red_active: bool = True) -> None:\n\n game_board = [[0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0]]\n\n if python_board is not None:\n self.board_array = np.array(python_board)\n else:\n self.board_array = np.array(game_board)\n\n self.move_number = 0\n\n # Creating the kernels to use in a 2d convolution to check the board for a winner later\n across = np.array([[1, 1, 1, 1]])\n vertical = np.transpose(across)\n main_diagonal = np.eye(4, dtype=np.uint8)\n off_diagonal = np.fliplr(main_diagonal)\n self._detection_kernels_red = [across, vertical, main_diagonal, off_diagonal]\n self._detection_kernels_yellow = [kernel * -1 for kernel in self._detection_kernels_red]\n\n self._is_red_active = red_active\n\n # Matches moves to their indices in self._valid_moves, this order is very important\n # for optimising alpha-beta pruning\n self._valid_move_order = {3: 0, 2: 1, 4: 2, 5: 3, 1: 4, 0: 5, 6: 6}\n self._valid_moves = [3, 2, 4, 5, 1, 0, 6]\n self._column_to_row = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}\n\n self._win_state = None\n\n # This code reads in the hash keys for use in Zobrist hashing, for more information, see\n # opening_book_gen.py\n red_hash_keys = []\n with open('data/Zobrist_Hash_Keys/Zobrist_red_key.csv') as file:\n reader = csv.reader(file)\n for row in reader:\n red_hash_keys.append([int(r) for r in row])\n self._red_hash_keys = np.array(red_hash_keys)\n\n yellow_hash_keys = []\n with open('data/Zobrist_Hash_Keys/Zobrist_yellow_key.csv') as file:\n reader = csv.reader(file)\n for row in reader:\n yellow_hash_keys.append([int(r) for r in row])\n self._yellow_hash_keys = np.array(yellow_hash_keys)\n\n self.hash = 0", "def checkNumNeighbors():", "def make_board(self, ):\n for r in range(self.boardSize):\n for c in range(self.boardSize): # avoid redundant calculation by adding neighbors \"behind\" current cell\n new_cell = Cell(r, c)\n self.board[r][c] = new_cell\n if c > 0: # add left neighbor-cell\n new_cell.add_neighbor(self.board[r][c-1])\n if r > 0: # add above neighbor-cell\n new_cell.add_neighbor(self.board[r-1][c])\n if r > 0 and c < self.boardSize-1: # add right diagonal neighbor-cell\n new_cell.add_neighbor(self.board[r-1][c+1])", "def gameOfLife(self, board: List[List[int]]) -> None:\n # copy matrix\n copy_matrix = [[board[row][col] for col in range(len(board[0]))] for row in range(len(board))]\n \n # 8 possible directions\n directions = [(0,1), (0, -1), (1,0), (-1,0), (-1,-1), (1,1), (1,-1), (-1,1)]\n num_rows = len(board)\n num_cols = len(board[0])\n \n # matrix traversal\n for i in range(0, num_rows):\n for j in range(0, num_cols):\n # for each cell, explore all of its neighboring cells\n num_live_cells = 0\n for direction in directions:\n r = i + direction[0]\n c = j + direction[1]\n # make sure if it is a live cell \n if (r < num_rows and r >=0) and (c < num_cols and c>=0) and (copy_matrix[r][c]==1):\n # if it is live cell, increment live_cell_count\n num_live_cells +=1\n # if here: We now have estimate of surrounding live cells\n # start applying rules \n # Rule-1: Any live cell with fewer than 2 live neighbors die\n # Rule-2: Any live cell with 2/3 live neighbors live up\n # Rule-3: Any Live cell with > 3 live neighbors die\n # Rule-4: Any dead cell with ==3 live neighbors becomes alive\n if copy_matrix[i][j] == 1 and (num_live_cells > 3 or num_live_cells < 2):\n # Rule-1 and Rule-3: So the current cell dies...\n board[i][j] = 0\n if copy_matrix[i][j] == 0 and num_live_cells == 3:\n # Rule-4: Dead becomes alive\n board[i][j] = 1\n # Rule-2 is taken care by default.", "def gameOfLife(self, board: List[List[int]]) -> None:\n m = len(board)\n if m==0:\n return board\n n = len(board[0])\n if n==0:\n return board\n def valid(a,b):\n if 0<=a<m and 0<=b<n:\n return True\n mat = [row[:] for row in board] #original copy of the board\n directions = [(0,-1),(-1,-1),(-1,0),(-1,1),(0,1),(1,1),(1,0),(1,-1)]\n for i in range(m):\n for j in range(n):\n #count how many live=1 or dead=0 cells surrounding cell (i,j)\n cnt_live=0\n for direc in directions:\n if valid(i+direc[0],j+direc[1]):\n if mat[i+direc[0]][j+direc[1]]==1:\n cnt_live+=1\n if mat[i][j]==1 and cnt_live<2 or mat[i][j]==1 and cnt_live>3:\n board[i][j]=0\n elif mat[i][j]==1 and 2<=cnt_live<=3 or mat[i][j]==0 and cnt_live==3:\n board[i][j]=1", "def get_size(self):\n return len(self.board)", "def solve(board) -> None:\n rows = len(board)\n if rows==0:\n return board\n cols = len(board[0])\n \n def is_border(rc):\n (rr, cc) =rc\n if rr<rows and rr< cols and rr>=0 and cc>=0 and board[rr][cc]=='O' and (rr==0 or rr==rows-1 or cc==0 or cc==cols-1):\n return True\n return False\n \n transf = []\n for r in range(rows):\n for c in range(cols):\n if board[r][c]=='O' and not is_border((r,c)) and not any(map(is_border, [(r-1, c), (r+1, c), (r, c-1), (r, c+1)])):\n transf.append((r,c))\n if transf:\n for r,c in transf:\n board[r][c]='X'\n return board", "def advance_board(self):\n # We can advance the board using a pretty simple convolution,\n # so we don't have to execute a lot of loops in python.\n # Of course, this probably won't be sufficient for extremely\n # large boards.\n self.num_steps += 1\n board = self.board\n cfilter = np.array([[1,1,1],[1,0,1],[1,1,1]], dtype=np.uint16)\n\n alive = board & CellTypes.alive > 0\n spawning = board & CellTypes.spawning > 0\n frozen = board & CellTypes.frozen > 0\n\n can_die = ~frozen & (\n convolve2d(board & CellTypes.preserving, cfilter) == 0)\n can_grow = ~frozen & (\n convolve2d(board & CellTypes.inhibiting, cfilter) == 0)\n\n num_neighbors = convolve2d(alive, cfilter)\n num_spawn = convolve2d(spawning, cfilter)\n spawn_prob = 1 - (1 - self.spawn_prob)**num_spawn\n has_spawned = coinflip(spawn_prob, board.shape)\n\n born_rule = np.zeros(9, dtype=bool)\n born_rule[list(self.born_rule)] = True\n dead_rule = np.ones(9, dtype=bool)\n dead_rule[list(self.survive_rule)] = False\n\n new_alive = (born_rule[num_neighbors] | has_spawned) & ~alive & can_grow\n new_dead = dead_rule[num_neighbors] & alive & can_die\n\n new_flags = np.zeros_like(board)\n color_weights = 1 * alive + 2 * spawning\n for color in CellTypes.colors:\n # For each of the colors, see if there are two or more neighbors\n # that have it. If so, any new cells (whether born or spawned)\n # will also get that color.\n has_color = board & color > 0\n new_color = convolve2d(has_color * color_weights, cfilter) >= 2\n new_flags += color * new_color\n indestructible = alive & (board & CellTypes.destructible == 0)\n new_flags += CellTypes.destructible * (convolve2d(indestructible, cfilter) < 2)\n\n board *= ~(new_alive | new_dead)\n board += new_alive * (CellTypes.alive + new_flags)", "def gameOfLife(self, board: List[List[int]]) -> None:\n rows = len(board)\n cols = len(board[0])\n if not rows or not cols:\n return board\n neighbors = [(1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1)]\n\n def no_of_live_neighbors(x, y):\n count = 0\n for dx, dy in neighbors:\n if 0 <= x + dx <= rows - 1 and 0 <= y + dy <= cols - 1:\n if abs(board[x + dx][y + dy]) == 1:\n count += 1\n return count\n\n for i in range(rows):\n for j in range(cols):\n live_neighbours = no_of_live_neighbors(i, j)\n if board[i][j] == 0 and live_neighbours == 3:\n board[i][j] = 2\n if board[i][j] == 1 and (live_neighbours < 2 or live_neighbours > 3):\n board[i][j] = -1\n for i in range(rows):\n for j in range(cols):\n if board[i][j] > 0:\n board[i][j] = 1\n else:\n board[i][j] = 0\n\n return board", "def inBoard(self, row, col):\n return 0 <= row < self.rows and 0 <= col < self.cols", "def on_board(self, pos):\n i, j = pos\n return 0 <= i < COLS and 0 <= j < ROWS", "def test_get_board(self):\n copy1 = self.game.get_board()\n self.assertEqual(copy1._board, self.game._board)\n\n for row in range(self.game._dim):\n for col in range(self.game._dim):\n self.game._board[row][col] = PLAYERX\n copy2 = self.game.get_board()\n self.assertEqual(copy2._board, self.game._board)\n\n for row in range(self.game._dim):\n for col in range(self.game._dim):\n self.game._board[row][col] = PLAYERO\n copy3 = self.game.get_board()\n self.assertEqual(copy3._board, self.game._board)", "def check_complete_board(start_pos, dim_square, board):\n change = False\n for row in range(8):\n for col in range(8):\n # Grab image on real board\n im = region_grabber((start_pos[0] + col * dim_square[0],\n start_pos[1] - (row + 1.0) * dim_square[1],\n start_pos[0] + (col + 1.0) * dim_square[0],\n start_pos[1] - row * dim_square[1]))\n\n # Check if piece corresponds with piece on board if there is a piece\n if piece_on_pos((row, col), board):\n obj = board[row][col]\n if (row + col) % 2 == 0: # Black background\n pos = imagesearcharea(obj.im_b, 0, 0, 0, 0, 0.9, im)\n if pos != [-1, -1]:\n continue\n else: # White background\n pos = imagesearcharea(obj.im_w, 0, 0, 0, 0, 0.9, im)\n if pos != [-1, -1]:\n continue\n\n # Else --> Go through every possible image\n if (row + col) % 2 == 0: # Black background\n # Pawn\n pos = imagesearcharea(\"Images/PWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PW\")\n continue\n pos = imagesearcharea(\"Images/PBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PB\")\n continue\n # Tower\n pos = imagesearcharea(\"Images/TWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TW\")\n continue\n pos = imagesearcharea(\"Images/TBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TB\")\n continue\n # Horse\n pos = imagesearcharea(\"Images/HWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HW\")\n continue\n pos = imagesearcharea(\"Images/HBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HB\")\n continue\n # Bishop\n pos = imagesearcharea(\"Images/BWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BW\")\n continue\n pos = imagesearcharea(\"Images/BBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BB\")\n continue\n # King\n pos = imagesearcharea(\"Images/KWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KW\")\n continue\n pos = imagesearcharea(\"Images/KBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KB\")\n continue\n # Queen\n pos = imagesearcharea(\"Images/QWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QW\")\n continue\n pos = imagesearcharea(\"Images/QBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QB\")\n continue\n board[row][col] = None\n else: # White background\n # Pawn\n pos = imagesearcharea(\"Images/PWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PW\")\n continue\n pos = imagesearcharea(\"Images/PBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PB\")\n continue\n # Tower\n pos = imagesearcharea(\"Images/TWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TW\")\n continue\n pos = imagesearcharea(\"Images/TBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TB\")\n continue\n # Horse\n pos = imagesearcharea(\"Images/HWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HW\")\n continue\n pos = imagesearcharea(\"Images/HBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HB\")\n continue\n # Bishop\n pos = imagesearcharea(\"Images/BWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BW\")\n continue\n pos = imagesearcharea(\"Images/BBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BB\")\n continue\n # King\n pos = imagesearcharea(\"Images/KWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KW\")\n continue\n pos = imagesearcharea(\"Images/KBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KB\")\n continue\n # Queen\n pos = imagesearcharea(\"Images/QWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QW\")\n continue\n pos = imagesearcharea(\"Images/QBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QB\")\n continue\n board[row][col] = None\n\n if change:\n pyautogui.moveTo(start_pos[0] + 4 * dim_square[0],\n start_pos[1] - 4 * dim_square[1], 0.2)\n\n return change" ]
[ "0.7049893", "0.6507776", "0.6334881", "0.63287544", "0.63064605", "0.62994397", "0.62831104", "0.6227796", "0.61846995", "0.61787885", "0.6168326", "0.6165085", "0.61543095", "0.6147843", "0.6074171", "0.6044323", "0.6024515", "0.6007123", "0.60047996", "0.5995366", "0.59897405", "0.59814143", "0.5975961", "0.5967561", "0.59665185", "0.59654844", "0.5965234", "0.5956529", "0.5955018", "0.5933343" ]
0.6705546
1
Load the dictionary from the filename
def load_dictionary(cls, filename, non_lang_syms=None): return AsrDictionary.load(filename, f_non_lang_syms=non_lang_syms)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_dictionary(filename):\n filename = os.path.join(FILE_DIR, 'assets/obj/' + filename)\n try:\n with open(filename, 'rb') as input:\n return pickle.load(input)\n except Exception as e:\n print(\"exception\", e)", "def load_dictionary(filepath):\r\n # context manager read binary\r\n with open(filepath, 'rb') as file:\r\n # pickle load\r\n return pickle.load(file)", "async def load(self, file: IO) -> dict:", "def load_dictionary(cls, args, filename, source=True):\n dictionary = Dictionary.load(filename)\n dictionary.add_symbol(\"<mask>\")\n return dictionary", "def load(filename):\n\n print \"Loading dictionary...\"\n dictionary = Dictionary()\n print \" Loading file...\"\n whole_file = file(filename).read().upper()\n print \" Splitting file...\"\n words = whole_file.split()\n print \" Removing unsuitable words...\"\n words = dictionary.remove_unsuitable_words(words)\n print \" Building data structures...\"\n dictionary.set_words(words)\n\n print \" Loaded %d words\" % len(dictionary.words)\n print \" Unique letter size:\"\n print \" No blanks: %d\" % len(dictionary.letters_map)\n print \" One blank: %d\" % len(dictionary.letters_map_one_blank)\n print \" Two blanks: %d\" % len(dictionary.letters_map_two_blanks)\n\n return dictionary", "def load_dict(name, path):\n ret_dict = {}\n with open(os.path.join(path, name) + '.pkl', 'rb') as f:\n while True:\n try:\n pair = pickle.load(f)\n ret_dict[pair[0]] = pair[1]\n except:\n return ret_dict", "def load_dictionary(dictionary_file):\n global dictionary\n logging.info('loading saved abstracts from {0}'.format(dictionary_file))\n with open(dictionary_file, 'r') as f:\n for line in f:\n j = json.loads(line)\n dictionary.update(j)", "def LoadDictFile(file,dict_,cast_type):\n\twith open(file,'r') as f:\n\t\tfor line in f:\n\t\t\tline = line.rstrip()\n\t\t\tlst = line.split('=')\n\t\t\tdict_[cast_type(lst[1])] = lst[0]", "def load(self, file=\"setup\", path=\"settings\"):\n\n # check if filename already contains file extension, if not, add it\n if file[-5:] != '.json':\n file += '.json'\n # load mappings from file\n with open(os.path.join(path, file), 'r') as file:\n self.data = json.load(file)", "def load(self):\n with io.open(self.filename, encoding='utf-8') as f:\n self.load_from_dict(json.loads(f.read()))", "def file_read(filename):\n f = open(filename, 'r') \n d_str = f.read() \n f.close()\n\n d = dict(eval(d_str))\n return d", "def _load_file(self, file_path: str) -> dict:\n raise NotImplementedError()", "def load(self, s):\n self._filename = s\n # self._isLoaded = True\n with open(s, 'r') as f:\n self._dict = json.load(f)", "def load(self, filename=None):\n prefix = os.path.dirname(filename)\n if not os.path.exists(prefix):\n os.makedirs(prefix)\n\n name = filename or self.filename\n\n if os.path.exists(name):\n with open(name, 'rb') as dbfile:\n self.data = yaml.safe_load(dbfile) or dict()", "def loadIdMap(self, filename:str) -> None :\n if(not isinstance(filename,str)):\n raise TypeError(\"filename must be a string but %s was passed\"%str(type(filename)))\n if(not os.path.exists(filename) or not os.path.isfile(filename)):\n raise ValueError(\"invalid filename\")\n\n self.idMap = self.ioutil.loadKeysVals(filename, \";\")", "def shelve_load(file_name: str, *args):\n res = {}\n with shelve.open(os.path.splitext(file_name)[0]) as db:\n for k, v in db.items():\n res[k] = v\n return res", "def load(self, filename):\n _json = self.read_json(filename, byteify=True)\n _json = self._byteify(_json, ignore_dicts=True)\n if not _json:\n return None\n _dict = {k : self._parse_value(v) for k, v in _json.items()}\n return _dict", "def load_from_file(self, filename):\n # clear datastore mape\n self._datastoreMap = {}\n # citanje filea\n with open(filename, 'rb') as f:\n binstr = f.read()\n inMap = pickle.loads(binstr)\n # za svaki kanal moramo dodati element u _datastoreMap\n for kanal in inMap:\n # stvaramo instancu Datastore\n self._datastoreMap[kanal] = DataStore()\n # instanca Datastore zna se otpakirati iz mape (dictionary)\n self._datastoreMap[kanal].dict2store(inMap[kanal])", "def load(self, filename):\n raise NotImplementedError", "def fromfilename(cls, filename):\n data = LazyDict(fn=filename)\n return cls(data)", "def dictionnary():\n try:\n file = open(file_name,\"r\")\n except:\n print(\"That didn't work, are you sure you defined a valid filename? Try using 'file <your filename>'\")\n return\n dictionnary = True", "def load_mapping(filename):\n with open(filename + '.pkl', 'rb') as handle:\n return pickle.load(handle)", "def from_file(cls, basename, *args, **keys):\n log.verbose(\"Loading mapping\", repr(basename), verbosity=55)\n path = keys.get(\"path\", None)\n if path:\n filename = os.path.join(path, os.path.basename(basename))\n basename = filename\n else:\n filename = config.locate_mapping(basename)\n text = utils.get_uri_content(filename)\n return cls.from_string(text, basename, *args, **keys)", "def load_lookup_dicto():\n with open(LOOKUP_PATH, 'rb') as f:\n lookup_dicto = pickle.load(f)\n return lookup_dicto", "def from_file(cls, file_name):\n\n with open(file_name, 'r') as fi:\n the_dict = json.load(fi)\n return cls.from_dict(the_dict)", "def _load_dictionary(dict_name: str, file: h5py.File) -> Dict:\n d = {}\n g = file[dict_name]\n for k in g:\n if \"finish_time\" in k or \"start_time\" in k:\n # need to decode byte-string into datetime object\n try:\n date_time_string = g[k][()].decode('UTF-8') # convert bye string to string\n except AttributeError:\n # it is already a unicode string\n date_time_string = g[k][()]\n d[k] = datetime.strptime(date_time_string, \"%m/%d/%Y %I:%M:%S %p\")\n else:\n d[k] = g[k][()]\n return d", "def _load_dict(self, dict_name=None):\n if dict_name is None:\n for name in self.dict_names:\n self._load_dict(name)\n else:\n dict_idx = self.dict_names.index(dict_name)\n if not os.path.exists(self.dict_files[dict_idx]):\n self.logger.warn(\"Not exists %s for %s\" % (\n self.dict_files[dict_idx], dict_name))\n else:\n dict_map = self.dicts[dict_idx]\n id_to_vocab_dict_map = self.id_to_vocab_dict_list[dict_idx]\n if dict_name != self.DOC_LABEL:\n dict_map[self.VOCAB_PADDING] = 0\n dict_map[self.VOCAB_UNKNOWN] = 1\n dict_map[self.VOCAB_PADDING_LEARNABLE] = 2\n id_to_vocab_dict_map[0] = self.VOCAB_PADDING\n id_to_vocab_dict_map[1] = self.VOCAB_UNKNOWN\n id_to_vocab_dict_map[2] = self.VOCAB_PADDING_LEARNABLE\n\n for line in open(self.dict_files[dict_idx], \"r\"):\n vocab = line.strip(\"\\n\").split(\"\\t\")\n dict_idx = len(dict_map)\n dict_map[vocab[0]] = dict_idx\n id_to_vocab_dict_map[dict_idx] = vocab[0]", "def load_json(self, filename):\n with open(filename, 'r', encoding='utf-8') as f:\n data_dict = json.load(f)\n return data_dict", "def unpickle(filename: str) -> dict:\n with open(os.path.join(f\"{ROOT_DIR}/dataset/\", filename), \"rb\") as file:\n dict = pickle.load(file, encoding=\"bytes\")\n return dict", "def read(self,filename):\n with open(str(filename),\"r\") as f:\n data = f.read()\n #check if the loaded file is json\n try:\n datajson = json.loads(data)\n except Exception as e:\n if mer == True:\n merrors.error('could not load '+str(filename)+', add a basic entry to the config like {\"name\":\"Example\"}. Python error: '+str(e))\n quit()\n else:\n print(\"could not load \"+str(filename)+\". Python error: \"+str(e))\n quit()\n self.datajson = datajson\n self.filename = filename\n f.close()" ]
[ "0.79066867", "0.7503246", "0.74313736", "0.7323459", "0.71952224", "0.71250665", "0.7083375", "0.70716083", "0.70621836", "0.7057795", "0.70570594", "0.7048259", "0.6998082", "0.6897135", "0.6888102", "0.68791723", "0.6873016", "0.6848805", "0.6822207", "0.6801678", "0.6796956", "0.6786406", "0.67595047", "0.67481923", "0.67447156", "0.674234", "0.67272484", "0.6716601", "0.6696447", "0.6687575" ]
0.75451654
1
View used when Nginx Upload Progress module is enabled. When the Nginx Upload Progress module is used, we get to the WSGI application (and thus to this view) only when the file has been completely written to disk by Nginx. So here we merely copy it to the final upload directory and that is all.
def upload_with_nginx_upload_progress(request): input_file, file_size, filename = get_file_from_request(request) upload_dir = request.registry.settings['poulda.upload_dir'] path = os.path.join(upload_dir, filename) with open(path, 'w') as output: # We must read only 'file_size' bytes from the 'input_file', # not all of it since it also contains the MIME boundary. copy_to_file(input_file, file_size, output) return HTTPFound(location='success')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload(request):\n # We pass the 'file_id' in the query string as a GET parameter. If\n # we read it from the POSTed data, WebOb would read all POSTed\n # data, which has various features and traps (like setting the\n # \"Content-Length\" header to 0) that we do not need since we are\n # going to read the data ourselves anyway.\n file_id = request.GET['X-Progress-ID']\n input_file, file_size, filename = get_file_from_request(request)\n session = DBSession()\n u = session.query(Upload).filter_by(id=file_id).one()\n upload_dir = request.registry.settings['poulda.upload_dir']\n user_id = authenticated_userid(request)\n # We use a temporary path to detect unfinished uploads (post\n # mortem, not in the application itself).\n path = os.path.join(upload_dir, '_'.join((user_id, file_id)))\n u.tmp_path = path\n u.started = int(time.time())\n u.size = file_size\n u.state = u'uploading'\n session.flush()\n # We need to commit the transaction so that changes to the Upload\n # object can be seen by the other threads (which will serve the\n # 'progress' JSON view called by the upload page).\n transaction.commit()\n with open(path, 'w') as output:\n # We must read only 'file_size' bytes from the 'input_file',\n # not all of it since it also contains the MIME boundary.\n copy_to_file(input_file, file_size, output)\n final_path = filename[1 + filename.rfind(os.sep):]\n final_path = os.path.join(upload_dir, final_path)\n os.rename(path, final_path)\n session = DBSession()\n u = session.query(Upload).filter_by(id=file_id).one()\n u.state = u'done'\n u.final_path = unicode(final_path, 'utf-8')\n return HTTPFound(location='success')", "def progress(request):\n file_id = request.GET['X-Progress-ID']\n session = DBSession()\n u = session.query(Upload).filter_by(id=file_id).one()\n data = {'state': u.state}\n if u.state == 'uploading':\n if not os.path.exists(u.tmp_path):\n # The temporary file has not been created yet or it has\n # already been renamed. We return 0 in both case, the\n # front-end code will know what to do.\n received = 0\n else:\n received = os.stat(u.tmp_path).st_size\n data.update({'size': u.size, 'received': received})\n return data", "def upload_progress(request):\n filename = request.COOKIES['sessionid']\n tmpfile = os.path.join(tempfile.gettempdir(), filename)\n try:\n f = open(tmpfile, 'r')\n data = f.readlines()\n f.close()\n except Exception, e:\n data = \"{'uploaded': '1', 'total': '1', finished: true}\"\n\n return HttpResponse(data)", "def handle_put_progress(self, filegen):\n # print \"bytes so-far: \", filegen.bytes_read\n\n if self.maybe_touch():\n self.log(\"UPLOAD_PROGRESS\", level=INFO)\n self.touch()\n Backend.touch(\n self.current_upload,\n bytes_downloaded=filegen.bytes_read,\n location=self.location)", "def upload_progress(self, cloud_file, size, uploaded):", "def upload_add_progress(self, nbytes):\n\n self.send_cur_nbytes += nbytes\n if self.send_goal_nbytes != 0:\n self.republish_output()", "def upload_file_view(user_data, cache):\n return UploadFilesCtrl(cache, user_data, request).to_response()", "def upload_file_view(user_data, cache):\n return UploadFilesCtrl(cache, user_data, request).to_response()", "def upload_index(self):\n return self.render(\"cadmin/upload.html\", connector=self.connector)", "def show_copy_status(self, source):\n self.view.show_message('Copy {}...'.format(os.path.basename(source)))\n self.copy_count += 1\n process = int(\n (float(self.copy_count) / float(self.total_copy_amount)) * 100)\n self.view.progress_bar.setValue(process)", "def render_successful_upload(request):\n torrent_file = request.files['file']\n filename = secure_filename(torrent_file.filename)\n username = request.form['username']\n save_torrent_file(filename, torrent_file, request.form['username'])\n dl_success = start_torrent_download(filename)\n update_download_info()\n return render_template('successful_upload.html', \n username=username, \n filename=filename, \n download_success=dl_success, \n downloads=downloads)", "def send_head(self):\n path = self.translate_path(self.path)\n if os.path.basename(path).startswith(UPLOAD_LINK):\n return self.render_upload_form(os.path.dirname(self.path))\n return super(SimpleHTTPRequestHandlerWithUpload, self).send_head()", "def index():\n return render_template(\"upload.html\")", "def copy_progress(self) -> Sequence['outputs.CopyProgressResponse']:\n return pulumi.get(self, \"copy_progress\")", "def copy_progress(self) -> Sequence['outputs.CopyProgressResponse']:\n return pulumi.get(self, \"copy_progress\")", "def copy_progress(self, percentage_complete, filecount, filecomplete):\n ##TODO: display the current transfer rate\n ##TODO: display the current file being transferred and possibly the progress thereof.\n ##Perhaps use the statusbar method for this\n self.progress.setValue(int(percentage_complete))", "def transfer_progress(self, stats):", "def upload():\n\n file = request.files['query']\n filepath = upload_filepath(secure_filename(file.filename))\n file.save(filepath)\n classification = classify(filepath)\n classification['filename'] = file.filename\n return render_template('index.html', classification=classification)", "def upload_file():\n retVal = None \n if request.method == 'POST' and upload_validated(request):\n retVal = render_successful_upload(request) \n else:\n retVal = render_index()\n return retVal", "def upload_progress(request):\n progress_id = ''\n if 'X-Progress-ID' in request.GET:\n progress_id = request.GET['X-Progress-ID']\n elif 'X-Progress-ID' in request.META:\n progress_id = request.META['X-Progress-ID']\n if progress_id:\n cache_key = \"%s_%s\" % (request.META['REMOTE_ADDR'], progress_id)\n data = cache.get(cache_key)\n return HttpResponse(simplejson.dumps(data))\n else:\n return HttpResponseServerError('Server Error: You must provide X-Progress-ID header or query param.')", "def completed_file(self, context):", "def get(self):\n path = os.path.join(os.path.dirname(__file__), '../pages/upload_information.html')\n self.response.out.write(template.render(path, {}))", "def download_progress(self, cloud_file, size, downloaded):", "def write_upload_files(self, appstruct):\n \n # Create the directory if it does not exist\n final_dir = \"thumbnails/%s\" % slugify(appstruct[\"serial\"])\n if not os.path.exists(final_dir):\n log.info(\"Make directory: %s\", final_dir)\n os.makedirs(final_dir)\n\n final_file = \"%s/uploaded.pdf\" % final_dir\n file_pointer = appstruct[\"pdf_upload\"][\"fp\"]\n self.single_file_write(file_pointer, final_file)", "def upload():\n global FILE_NAME\n target = os.path.join(APP_ROOT, \"images\")\n print(target)\n\n if not os.path.isdir(target):\n os.mkdir(target)\n\n for file in request.files.getlist(\"file\"):\n print(file)\n filename = file.filename\n destination = \"/\".join([target, filename])\n FILE_NAME = destination\n file.save(destination)\n return render_template(\"complete.html\")", "def handle_uploaded_file(f):\n path = settings.ABS_PATH + \"Server_data_visualization/uploads/executable\"\n destination = open(path, \"wb+\")\n for chunk in f.chunks():\n destination.write(chunk)\n destination.close()\n # os.chmod(path, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)", "def upload(request):\n # handle only POSTed Data\n if request.method == 'POST':\n form = UploadProjectForm(request.POST, request.FILES)\n # validate form based on form definition\n if form.is_valid():\n project = form.save(commit=False)\n project.user = request.user\n pj_dir = os.path.join(MEDIA_ROOT, \"projects\", request.user.username, form.cleaned_data['title'])\n project.save()\n pathlib.Path(pj_dir).mkdir(parents=True, exist_ok=True)\n print(project.upload_file.path)\n zip_path = os.path.join(MEDIA_ROOT, project.upload_file.path)\n project.index_path = os.path.join(MEDIA_URL, 'projects', request.user.username,\n form.cleaned_data['title'], 'index.html')\n project.save()\n with zipfile.ZipFile(zip_path) as f:\n f.extractall(pj_dir)\n return redirect('physics:index')\n else:\n form = UploadProjectForm()\n return render(request, 'upload/upload.html', {'form': form})", "def add_progress():\n task_id = request.args.get('tid')\n return render_template('progress.html', task_id=task_id) if task_id else redirect('/')", "def status(self):\n\t\tstatus = self.thread.status()\n#\t\tprint_array(status)\n\t\tmessage = [\"------ RSYNC PROGRESS ------ \"]\n\t\tif self.log_message:\n\t\t\tmessage.append(self.log_message)\n\t\tmessage.append(\"Current file: %s\" % status['current_file'])\n\t\tmessage.append(\"\\tBytes Copied: %s\" % status['bytes_copied'])\n\t\tmessage.append(\"\\tPercent Done: %s\" % status['percent_done'])\n\t\tmessage.append(\"\\tTransfer Rate: %s\" % status['transfer_rate'])\n\t\tmessage.append(\"\\tTime Remaining: %s\" % status['est_remain'])\n\t\tmessage.append(\"\\tTransfer Number: %s\" % status['xfer_num'])\n\t\tmessage.append(\"\\tTransfers Remaining: %s\" % status['xfer_remain'])\n\t\tmessage.append(\"\\tTransfers Total: %s\" % status['xfer_total'])\n\t\tmessage.append(\"\\t----------------------------------\")\n\t\ttry:\n\t\t\toverall_percent = int(round((int(status['xfer_num'])*1.0)/int(status['xfer_total']),2)*100)\n\t\texcept: overall_percent = 0\n\t\tmessage.append(\"\\tTotal Rsync done: %s%%\\n\" % overall_percent)\n\t\tp = open(self.progress_file,'w+',0)\n\t\tfor line in message:\n\t\t\t#print line\n\t\t\tp.write(\"%s\\n\" % line)\n\t\tp.flush()\n\t\tp.close()", "def chunk_upload_to(instance, filename):\n return os.path.join(FLOWJS_PATH, instance.filename)" ]
[ "0.63862", "0.6194678", "0.6183757", "0.58975005", "0.5862023", "0.58206344", "0.57981217", "0.57981217", "0.5738606", "0.55839986", "0.55655915", "0.5544447", "0.55011976", "0.5443568", "0.5443568", "0.54429203", "0.5387098", "0.53435814", "0.5304923", "0.5290966", "0.5274816", "0.5272419", "0.52682716", "0.524073", "0.5240444", "0.5237984", "0.52324253", "0.52265894", "0.5225377", "0.5222546" ]
0.7100043
0
View used only when the Nginx Upload Progress support has been disabled. When the Nginx Upload Progress support is enabled, this view is never called at all, since the Nginx module takes care of returning progress information.
def progress(request): file_id = request.GET['X-Progress-ID'] session = DBSession() u = session.query(Upload).filter_by(id=file_id).one() data = {'state': u.state} if u.state == 'uploading': if not os.path.exists(u.tmp_path): # The temporary file has not been created yet or it has # already been renamed. We return 0 in both case, the # front-end code will know what to do. received = 0 else: received = os.stat(u.tmp_path).st_size data.update({'size': u.size, 'received': received}) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload_progress(request):\n filename = request.COOKIES['sessionid']\n tmpfile = os.path.join(tempfile.gettempdir(), filename)\n try:\n f = open(tmpfile, 'r')\n data = f.readlines()\n f.close()\n except Exception, e:\n data = \"{'uploaded': '1', 'total': '1', finished: true}\"\n\n return HttpResponse(data)", "def upload_progress(request):\n progress_id = ''\n if 'X-Progress-ID' in request.GET:\n progress_id = request.GET['X-Progress-ID']\n elif 'X-Progress-ID' in request.META:\n progress_id = request.META['X-Progress-ID']\n if progress_id:\n cache_key = \"%s_%s\" % (request.META['REMOTE_ADDR'], progress_id)\n data = cache.get(cache_key)\n return HttpResponse(simplejson.dumps(data))\n else:\n return HttpResponseServerError('Server Error: You must provide X-Progress-ID header or query param.')", "def upload_progress(request):\n progress_id = None\n if 'X-Progress-ID' in request.GET:\n progress_id = request.GET['X-Progress-ID']\n elif 'X-Progress-ID' in request.META:\n progress_id = request.META['X-Progress-ID']\n if progress_id:\n from django.utils import simplejson\n cache_key = \"%s_%s\" % (request.META['REMOTE_ADDR'], progress_id)\n data = cache.get(cache_key)\n json = simplejson.dumps(data)\n return HttpResponse(json)\n else:\n return HttpResponseBadRequest('Server Error: You must provide X-Progress-ID header or query param.')", "def get_progress(self):\r\n return None", "def upload(request):\n # We pass the 'file_id' in the query string as a GET parameter. If\n # we read it from the POSTed data, WebOb would read all POSTed\n # data, which has various features and traps (like setting the\n # \"Content-Length\" header to 0) that we do not need since we are\n # going to read the data ourselves anyway.\n file_id = request.GET['X-Progress-ID']\n input_file, file_size, filename = get_file_from_request(request)\n session = DBSession()\n u = session.query(Upload).filter_by(id=file_id).one()\n upload_dir = request.registry.settings['poulda.upload_dir']\n user_id = authenticated_userid(request)\n # We use a temporary path to detect unfinished uploads (post\n # mortem, not in the application itself).\n path = os.path.join(upload_dir, '_'.join((user_id, file_id)))\n u.tmp_path = path\n u.started = int(time.time())\n u.size = file_size\n u.state = u'uploading'\n session.flush()\n # We need to commit the transaction so that changes to the Upload\n # object can be seen by the other threads (which will serve the\n # 'progress' JSON view called by the upload page).\n transaction.commit()\n with open(path, 'w') as output:\n # We must read only 'file_size' bytes from the 'input_file',\n # not all of it since it also contains the MIME boundary.\n copy_to_file(input_file, file_size, output)\n final_path = filename[1 + filename.rfind(os.sep):]\n final_path = os.path.join(upload_dir, final_path)\n os.rename(path, final_path)\n session = DBSession()\n u = session.query(Upload).filter_by(id=file_id).one()\n u.state = u'done'\n u.final_path = unicode(final_path, 'utf-8')\n return HTTPFound(location='success')", "def upload_with_nginx_upload_progress(request):\n input_file, file_size, filename = get_file_from_request(request)\n upload_dir = request.registry.settings['poulda.upload_dir']\n path = os.path.join(upload_dir, filename)\n with open(path, 'w') as output:\n # We must read only 'file_size' bytes from the 'input_file',\n # not all of it since it also contains the MIME boundary.\n copy_to_file(input_file, file_size, output)\n return HTTPFound(location='success')", "def add_progress():\n task_id = request.args.get('tid')\n return render_template('progress.html', task_id=task_id) if task_id else redirect('/')", "def upload_progress(self, cloud_file, size, uploaded):", "def get_progress(self):\n return self.cloudserver.progress", "def upload_progress(request, account_slug=None):\n #pylint: disable=unused-argument\n if 'X-Progress-ID' in request.GET:\n progress_id = request.GET['X-Progress-ID']\n elif 'X-Progress-ID' in request.META:\n progress_id = request.META['X-Progress-ID']\n if progress_id:\n cache_key = \"%s_%s\" % (request.META['REMOTE_ADDR'], progress_id)\n data = cache.get(cache_key)\n return HttpResponse(json.dumps(data))", "def getProgress(self):", "def transfer_progress(self, stats):", "def progress(request):\n\n progress_key = json.loads(request.body).get('progress_key')\n\n return {\n 'progress_key': progress_key,\n 'progress': cache.get(progress_key) or 0\n }", "def __show_progress(self, _cur_file_idx, _file_count):\n if (self.__is_show_proegress == False):\n return\n\n if(_file_count == 0):\n raise StandardError('no file found.')\n\n # show progress for each 5% (20 steps)\n digit = math.modf(math.log10(_file_count))[1]\n if(digit < 3):\n print \"prog: [{0}%] {1}/{2}\".format((100 * _cur_file_idx) /_file_count,\n _cur_file_idx, _file_count)\n else:\n digit = digit - 2\n skipstep10 = math.pow(10, digit)\n if ((_cur_file_idx % skipstep10) == 0):\n print \"prog: [{0}%] {1}/{2}\".format((100 * _cur_file_idx) /_file_count,\n _cur_file_idx, _file_count)", "def upload_add_progress(self, nbytes):\n\n self.send_cur_nbytes += nbytes\n if self.send_goal_nbytes != 0:\n self.republish_output()", "def sync_crpp_progress(request):\n\n oErr = ErrHandle()\n data = {'status': 'preparing'}\n\n try:\n # Get the user\n username = request.user.username\n # Get the synchronization type\n get = request.GET\n synctype = \"\"\n if 'synctype' in get:\n synctype = get['synctype']\n\n if synctype == '':\n # Formulate a response\n data['status'] = 'error'\n data['msg'] = \"no sync type specified\" \n\n else:\n # Formulate a response\n data['status'] = 'UNKNOWN'\n\n # Get the appropriate status object\n # sleep(1)\n oStatus = Status.objects.filter(user=username, type=synctype).first()\n\n # Check what we received\n if oStatus == None:\n # There is no status object for this type\n data['status'] = 'error'\n data['msg'] = \"Cannot find status for {}/{}\".format(\n username, synctype)\n else:\n # Get the last status information\n data['status'] = oStatus.status\n data['msg'] = oStatus.msg\n data['count'] = oStatus.count\n\n # Return this response\n return JsonResponse(data)\n except:\n msg = oErr.get_error_message()\n oErr.DoError(\"sync_crpp_progress error\")\n data = {'status': 'error'}\n\n # Return this response\n return JsonResponse(data)", "def fetch_progress(self):\n threads = len(opts.thread)\n files = len(self.files)\n t_width = len(str(threads))\n f_width = len(str(files))\n\n t_progress = f\"[{self.pos: >{t_width}}/{threads}]\"\n f_progress = f\"[{self.count: >{f_width}}/{files}]\"\n\n if self.count:\n progress = f\"{t_progress} {f_progress}\"\n else:\n progress = t_progress\n\n return progress", "def status(self):\n\t\tstatus = self.thread.status()\n#\t\tprint_array(status)\n\t\tmessage = [\"------ RSYNC PROGRESS ------ \"]\n\t\tif self.log_message:\n\t\t\tmessage.append(self.log_message)\n\t\tmessage.append(\"Current file: %s\" % status['current_file'])\n\t\tmessage.append(\"\\tBytes Copied: %s\" % status['bytes_copied'])\n\t\tmessage.append(\"\\tPercent Done: %s\" % status['percent_done'])\n\t\tmessage.append(\"\\tTransfer Rate: %s\" % status['transfer_rate'])\n\t\tmessage.append(\"\\tTime Remaining: %s\" % status['est_remain'])\n\t\tmessage.append(\"\\tTransfer Number: %s\" % status['xfer_num'])\n\t\tmessage.append(\"\\tTransfers Remaining: %s\" % status['xfer_remain'])\n\t\tmessage.append(\"\\tTransfers Total: %s\" % status['xfer_total'])\n\t\tmessage.append(\"\\t----------------------------------\")\n\t\ttry:\n\t\t\toverall_percent = int(round((int(status['xfer_num'])*1.0)/int(status['xfer_total']),2)*100)\n\t\texcept: overall_percent = 0\n\t\tmessage.append(\"\\tTotal Rsync done: %s%%\\n\" % overall_percent)\n\t\tp = open(self.progress_file,'w+',0)\n\t\tfor line in message:\n\t\t\t#print line\n\t\t\tp.write(\"%s\\n\" % line)\n\t\tp.flush()\n\t\tp.close()", "def reportProgress(self):\n \n pass", "def yt_dlp_progress_hook(self, data: Dict[str, Any]) -> None:\n\n if data[\"status\"] == \"downloading\":\n file_bytes = data.get(\"total_bytes\")\n if file_bytes is None:\n file_bytes = data.get(\"total_bytes_estimate\")\n\n downloaded_bytes = data.get(\"downloaded_bytes\")\n if self.parent.simple_tui and not self.parent.web_ui:\n self.progress = 50\n elif file_bytes and downloaded_bytes:\n self.progress = downloaded_bytes / file_bytes * 50\n\n self.update(\"Downloading\")", "def handle_put_progress(self, filegen):\n # print \"bytes so-far: \", filegen.bytes_read\n\n if self.maybe_touch():\n self.log(\"UPLOAD_PROGRESS\", level=INFO)\n self.touch()\n Backend.touch(\n self.current_upload,\n bytes_downloaded=filegen.bytes_read,\n location=self.location)", "def addRenderProgress(call, args=(), kwargs={}, nodeClass='Write'):", "def upload_index(self):\n return self.render(\"cadmin/upload.html\", connector=self.connector)", "def progress(self):\n return self.runProgress", "def progress(self):\n return self.runProgress", "def download_progress(self, cloud_file, size, downloaded):", "def progress_status(self):\n from tqdm import tqdm\n pbar_a = tqdm(total=len(self.jobs), position=0)\n pbar_a.set_description('Submitted jobs ...')\n pbar_b = tqdm(total=self.n_submit_script, position=1)\n pbar_b.set_description('Running jobs ...')\n pbar_c = tqdm(total=self.n_submit_script, position=2)\n pbar_c.set_description('Completed jobs ...')\n pbar_d = tqdm(total=self.n_submit_script, position=3)\n pbar_d.set_description('Failed? jobs ...')\n while self.n_completed < self.n_submit_script:\n pbar_a.n = self.n_submitted\n pbar_b.n = self.n_running\n pbar_c.n = self.n_completed\n pbar_d.n = self.n_failed + self.n_unknown\n pbar_a.refresh()\n pbar_b.refresh()\n pbar_c.refresh()\n pbar_d.refresh()\n sleep(5)\n self.update_status()", "def GetProgress(self):\n return self.new_progress", "def progress(self) -> JSON:\n return {\n 'up': True,\n 'unindexed_bundles': sum(self.queues[config.notifications_queue_name()].get('messages', {}).values()),\n 'unindexed_documents': sum(chain.from_iterable(\n self.queues[config.tallies_queue_name(retry=retry)].get('messages', {}).values()\n for retry in (False, True)\n ))\n }", "def _progress(self, num_completed_batches, data_loader):\n return '[{}/{} ({:.0f}%)]'.format(num_completed_batches, len(data_loader),\n 100.0 * num_completed_batches / len(data_loader))" ]
[ "0.6554945", "0.64034593", "0.62213355", "0.62133723", "0.6208436", "0.6106646", "0.6060596", "0.5965132", "0.59357363", "0.5908629", "0.5904201", "0.59027714", "0.5886245", "0.5858225", "0.58090687", "0.5805078", "0.573955", "0.5732239", "0.57051885", "0.5704621", "0.5654044", "0.56472486", "0.5594812", "0.5575072", "0.5575072", "0.5535082", "0.5530335", "0.5523983", "0.550419", "0.5503364" ]
0.667452
0
Return score for a subj, obj pair of entities.
def get_fact_score(extracted_scores, subj, obj, freq_dict, score_type='FREQ_SCORE'): score_types = set('FREQ_SCORE', 'MIN_SCORE') # Min of Page Rank scores of both Entities # Upweight facts where both have high scores min_score = min( extracted_scores[subj], extracted_scores[obj] ) # Freq Score - If both entities are present - sum of frequencies # Upweight facts where both entities are in passage if subj in freq_dict and obj in freq_dict: freq_score = freq_dict[subj] + freq_dict[obj] else: freq_score = min(extracted_scores[subj], extracted_scores[obj]) if score_type == 'FREQ_SCORE': return freq_score elif score_type == 'MIN_SCORE': return min_score else: ValueError( 'The score_type should be one of: %s' + ', '.join(list(score_types)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_score(self, obj):\r\n query = \"\"\"\r\n SELECT SUM(vote), COUNT(vote)\r\n FROM %s\r\n WHERE content_type_id = %%s\r\n AND object_id = %%s\"\"\" % qn(self.model._meta.db_table)\r\n ctype = ContentType.objects.get_for_model(obj)\r\n cursor = connection.cursor()\r\n cursor.execute(query, [ctype.id, obj._get_pk_val()])\r\n result = cursor.fetchall()[0]\r\n # MySQL returns floats and longs respectively for these\r\n # results, so we need to convert them to ints explicitly.\r\n return {\r\n 'score': result[0] and int(result[0]) or 0,\r\n 'num_votes': int(result[1]),\r\n }", "def get_score(self, obj):\n ctype = ContentType.objects.get_for_model(obj)\n result = self.filter(object_id=obj._get_pk_val(),\n content_type=ctype).extra(\n select={\n 'score': 'COALESCE(SUM(vote), 0)',\n 'num_votes': 'COALESCE(COUNT(vote), 0)',\n }).values_list('score', 'num_votes')[0]\n\n return {\n 'score': int(result[0]),\n 'num_votes': int(result[1]),\n }", "def get_score(self, obj):\n content_type = ContentType.objects.get_for_model(obj)\n result = self.filter(content_type=content_type,\n object_id=obj._get_pk_val()).aggregate(\n score=Sum('vote'),\n num_votes=Count('vote'))\n #It may happen that there has been no voting on this object so far.\n if result['score'] is None:\n result['score'] = 0\n\n result['upvotes'] = self.get_upvotes(obj)\n result['downvotes'] = self.get_downvotes(obj)\n\n return result", "def __get_score(self):\n for pair in zip(self.nu[self.nu_idx:], self.sw[self.sw_idx:]):\n if pair[0] == pair[1]:\n self.score += 1\n else:\n break", "def score_object(cls, obj: Any) -> float:\n\n if not obj:\n return -1.0\n\n def score(value: Any) -> float:\n if isinstance(value, str):\n return 1.0\n\n if value is not None:\n return 1.5\n\n return 0.0\n\n if is_dataclass(obj):\n return sum(score(getattr(obj, var.name)) for var in fields(obj))\n\n return score(obj)", "def get_score(self, a, b):\n ### FILL IN ###", "def score(self):", "def compute_score(self, gts, res):\n\n assert(gts.keys() == res.keys())\n imgIds = gts.keys()\n\n cider_scorer = CiderScorer(n=self._n, sigma=self._sigma, \n document_frequency=self._doucument_frequency, \n ref_len=self._ref_len)\n\n for id in imgIds:\n hypo = res[id]\n ref = gts[id]\n\n # Sanity check.\n assert(type(hypo) is list)\n assert(len(hypo) == 1)\n assert(type(ref) is list)\n assert(len(ref) > 0)\n\n cider_scorer += (hypo[0], ref)\n\n (score, scores) = cider_scorer.compute_score()\n\n return score, scores", "def getScore(data):\n return score", "def get_score(self):\n return self.score", "def score(self):\n\n self.link()\n roc, _ = self.aggregate()\n\n return roc", "def calculate_scores(players):\n scores = {}\n for player in players.tuple_:\n scores[player.id_] = player.score()\n return scores", "def judge(name):\n score = 0\n for scoreID, scorer, weight in weights:\n subscore = scorer(name)\n score += subscore * weight\n name.scores[scoreID] = subscore\n name.score = score\n return score", "def get_score(self, student_answers):\r\n pass", "def __call__(self, json_res):\r\n id2hyps = {\r\n res['clip_id']: [_remove_nonascii(res['descs'][0]['desc'].strip())]\r\n for res in json_res\r\n }\r\n id2hyps = self.tokenizer.tokenize(id2hyps)\r\n assert len(id2hyps) == len(self.id2refs)\r\n\r\n ret_scores = {}\r\n for scorer, method in self.scorers:\r\n print(f\"Computing {method} score...\")\r\n score, scores = scorer.compute_score(self.id2refs, id2hyps)\r\n if isinstance(method, list):\r\n for sc, scs, m in zip(score, scores, method):\r\n ret_scores[m] = sc * 100\r\n else:\r\n ret_scores[method] = score * 100\r\n\r\n return ret_scores", "def get_score(self, solution: np.array) -> float:\n pass", "def score(self, X, y):\n ...", "def extract_score(results):\n total_score = 0;\n total_possible_score = 0;\n for k in results.keys():\n total_score = total_score + results[k][0]\n total_possible_score = total_possible_score + results[k][1]\n return (total_score, total_possible_score)", "def get_score(self):\n return tuple(self.score)", "def test_get_score(self):\r\n score_dict = self.combinedoe.get_score()\r\n self.assertEqual(score_dict['score'], 15.0)\r\n self.assertEqual(score_dict['total'], 5.0)", "def mt_score_CHILD(signame):\n return ((signame, score(DE, LINCS, signame)))", "def test_get_score(self):\r\n score_dict = self.combinedoe.get_score()\r\n self.assertEqual(score_dict['score'], 0)\r\n self.assertEqual(score_dict['total'], 1)", "def getSubmissionScore(submission):\r\n return submission.score", "def __match_num(self, obj):\n score = 0\n for attr in self.list:\n try:\n if getattr(obj, attr) == getattr(self, attr):\n score += 1\n except AttributeError:\n pass\n return score", "def get_scores(self):\n return self.score", "def get_scores_in_bulk(self, objects):\n object_ids = [o._get_pk_val() for o in objects]\n if not object_ids:\n return {}\n \n ctype = ContentType.objects.get_for_model(objects[0])\n \n if supports_aggregates:\n queryset = self.filter(\n object_id__in = object_ids,\n content_type = ctype,\n ).values(\n 'object_id',\n ).annotate(\n score = CoalesceSum('vote', default='0'),\n num_votes = CoalesceCount('vote', default='0'),\n )\n else:\n queryset = self.filter(\n object_id__in = object_ids,\n content_type = ctype,\n ).extra(\n select = {\n 'score': 'COALESCE(SUM(vote), 0)',\n 'num_votes': 'COALESCE(COUNT(vote), 0)',\n }\n ).values('object_id', 'score', 'num_votes')\n queryset.query.group_by.append('object_id')\n \n vote_dict = {}\n for row in queryset:\n vote_dict[row['object_id']] = {\n 'score': int(row['score']),\n 'num_votes': int(row['num_votes']),\n }\n \n return vote_dict", "def get_scores_in_bulk(self, objects):\r\n vote_dict = {}\r\n if len(objects) > 0:\r\n query = \"\"\"\r\n SELECT object_id, SUM(vote), COUNT(vote)\r\n FROM %s\r\n WHERE content_type_id = %%s\r\n AND object_id IN (%s)\r\n GROUP BY object_id\"\"\" % (\r\n qn(self.model._meta.db_table),\r\n ','.join(['%s'] * len(objects))\r\n )\r\n ctype = ContentType.objects.get_for_model(objects[0])\r\n cursor = connection.cursor()\r\n cursor.execute(query, [ctype.id] + [obj._get_pk_val() \\\r\n for obj in objects])\r\n results = cursor.fetchall()\r\n vote_dict = dict([(int(object_id), {\r\n 'score': int(score),\r\n 'num_votes': int(num_votes),\r\n }) for object_id, score, num_votes in results])\r\n return vote_dict", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score" ]
[ "0.63347", "0.6332622", "0.61146146", "0.60328287", "0.5940963", "0.5918125", "0.58817494", "0.5824574", "0.58078414", "0.58037364", "0.578324", "0.57741785", "0.5726161", "0.5708169", "0.57081425", "0.57005334", "0.56996214", "0.5697008", "0.56950974", "0.56923544", "0.5598369", "0.5596674", "0.55864066", "0.55787545", "0.55498946", "0.55348307", "0.55241054", "0.55184644", "0.55184644", "0.55184644" ]
0.6586029
0
barcode_filename should throw when sequence is wrong A ValueError exception should be raised if the second digit of the sequence (sequence % 10) is not the same as the ISO weekday number (%u).
def test_wrong_sequence(self): date = datetime(2016, 11, 12) seq = 31 with self.assertRaises(ValueError): star_barcode.barcode_filename(date, seq)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_ordinary(self):\n date = datetime(2016, 11, 12)\n seq = 36\n name = star_barcode.barcode_filename(date, seq)\n self.assertEqual(\n name,\n 'Barcode_2016-W45-6_36.pdf'\n )", "def test_year_boundary(self):\n date = datetime(2017, 1, 1)\n seq = 27\n name = star_barcode.barcode_filename(date, seq)\n self.assertEqual(\n name,\n 'Barcode_2016-W52-7_27.pdf'\n )", "def test_issn_incorrect_length(self):\n issns = ['0307-15', '0307-15789', '03071758', '0307175']\n for num in issns:\n with self.subTest(num=num):\n with self.assertRaisesRegex(ValueError, num):\n star_barcode.construct_postscript(\n issn=num,\n bwipp_location=self.bwipp,\n sequence=21,\n week=46,\n header_line=''\n )", "def test_week_wrong(self):\n weeks = [0, 54]\n for week in weeks:\n with self.subTest(week=week):\n with self.assertRaisesRegex(ValueError, str(week)):\n star_barcode.construct_postscript(\n week=week,\n bwipp_location=self.bwipp,\n issn=self.issn,\n sequence=21,\n header_line=''\n )", "def checkBarcode(barcode):\r\n barcode = barcode.strip()\r\n if validateFormat(barcode) is False:\r\n return 'barcode not valid'\r\n else:\r\n barcode = barcode.replace('-','')\r\n if len(barcode) == 12:\r\n fullbarcode = barcode + str(findlastdigit(barcode))\r\n return fullbarcode\r\n elif len(barcode) == 13:\r\n if findlastdigit(barcode) == int(barcode[-1]):\r\n return 'Valid'\r\n else:\r\n return 'Invalid'", "def test_sequence_outside_range(self):\n seqs = [-1, 100]\n for seq in seqs:\n with self.subTest(seq=seq):\n with self.assertRaisesRegex(ValueError, str(seq)):\n star_barcode.construct_postscript(\n sequence=seq,\n bwipp_location=self.bwipp,\n issn=self.issn,\n week=46,\n header_line=''\n )", "def test_price_code_0(self):\n start_date = datetime(2016, 11, 13)\n # Note this is a Sunday, and must have timedelta added\n\n for x in range(1, 7):\n with self.subTest(x=x):\n date = start_date + timedelta(x)\n result = star_barcode.barcode_filename(date, x)\n self.assertEqual(\n result.split('.')[0][-2:],\n f'{x:02}'\n )", "def validateFormat(barcode):\r\n validatesymbol = 0\r\n delimitedsymbol = 0\r\n if barcode[0] == '' or barcode[-1] == '':\r\n validatesymbol += 1\r\n for i in range(len(barcode)):\r\n try:\r\n int(barcode[i])\r\n except ValueError:\r\n if barcode[i] == '-':\r\n delimitedsymbol += 1\r\n else:\r\n validatesymbol += 1\r\n if delimitedsymbol == 0 and validatesymbol == 0:\r\n if len(barcode) == 12 or len(barcode) == 13:\r\n pass\r\n else:\r\n validatesymbol += 1\r\n if validatesymbol == 0:\r\n return True\r\n else:\r\n return False", "def test_generate_barcode_ean8(self):\n pass", "def test_buildPDFRejectsInvalidBookFilename(self):\n builder = BookBuilder()\n self.assertRaises(\n ValueError,\n builder.buildPDF,\n FilePath(self.mktemp()).child(\"foo\"),\n None,\n None)", "def test_generate_barcode_ean13(self):\n pass", "def test_standard_seq_week(self):\n date = datetime(2016, 11, 15)\n prices = [2] * 7\n expected_sequence = 22\n expected_week = 46\n self.assertEqual(\n star_barcode.date_to_sequence_and_week(\n date=date, price_codes=prices),\n (expected_sequence, expected_week)\n )", "def test_sequence_0_to_9(self):\n seqs = list(range(10))\n for seq in seqs:\n with self.subTest(seq=seq):\n result = star_barcode.construct_postscript(\n sequence=seq,\n bwipp_location=self.bwipp,\n issn=self.issn,\n week=20,\n header_line=''\n )\n self.assertGreater(\n result.find(f'{self.issn} {seq:02}'),\n -1\n )", "def test_ascii_increment(self):\r\n self.assertRaises(ValueError, convert_fastaqual, self.fasta_file_path,\r\n ascii_increment=140, output_directory=self.output_dir)", "def testBadFormatISBNAgain(self): \n val = format_isbn(\"12345678\")\n self.assertFalse(val)", "def test_exception_case(file_with_exception_value):\n with pytest.raises(ValueError, match=\"It is not a magic number!\"):\n read_magic_number(file_with_exception_value)", "def testBadFormatISBN(self): \n val = format_isbn(\"1234567843534594123\")\n self.assertFalse(val)", "def test_save_hex_path_not_to_hex_file():\n with pytest.raises(ValueError) as ex:\n uflash.save_hex('foo', '')\n assert ex.value.args[0] == 'The path to flash must be for a .hex file.'", "def format_filename(prefix, suffix, seq_len, uncased):\n seq_str = \"seq-{}\".format(seq_len)\n if uncased:\n case_str = \"uncased\"\n else:\n case_str = \"cased\"\n\n file_name = \"{}.{}.{}.{}\".format(prefix, seq_str, case_str, suffix)\n\n return file_name", "def check_sequence_name_format(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n\n progress_controller.maximum = 2\n\n # do not consider referenced shot nodes\n shots = pm.ls(type=\"shot\")\n shot = None\n for s in shots:\n if s.referenceFile() is None:\n shot = s\n break\n\n sequencer = shot.outputs(type=\"sequencer\")[0]\n\n # get current task\n from anima.dcc import mayaEnv\n\n m = mayaEnv.Maya()\n v = m.get_current_version()\n task = v.task\n\n # get sequence and scene names\n sequence_name = get_seq_name_from_task(task)\n scene_name = get_scene_name_from_task(task)\n\n progress_controller.increment()\n # set sequencer name as seq_name + sc_name\n name = \"%s_%s\" % (sequence_name, scene_name)\n\n if sequencer.get_sequence_name() != name:\n progress_controller.complete()\n raise PublishError(\n \"Sequence name format is not correct!!!<br>\"\n \"<br>\"\n \"It should have been:<br>\"\n \"<br>\"\n \"%s<br>\"\n \"<br>\"\n \"But found:<br>\"\n \"%s\" % (name, sequencer.get_sequence_name())\n )\n\n progress_controller.complete()", "def test_ascii_increment(self):\r\n self.assertRaises(ValueError, convert_fastq, self.fasta_file_path,\r\n self.qual_file_path, ascii_increment=140, output_directory=self.output_dir)\r\n self.assertRaises(ValueError, convert_fastq, self.fasta_file_path,\r\n self.qual_file_path, ascii_increment=10, output_directory=\r\n self.output_dir)", "def test_missing_bwipp(self):\n seq = 21\n week = 46\n header = 'MSTAR 2016-11-14 MON 1.0'\n with self.assertRaisesRegex(ValueError, 'BWIPP'):\n star_barcode.construct_postscript(\n bwipp_location=Path('/fake-path/not-here.ps'),\n issn=self.issn,\n sequence=seq,\n week=week,\n header_line=header\n )", "def test_generate_barcode_upce(self):\n pass", "def test_example_day9_pt1():\n assert find_first_invalid_value(ex_data, 5) == 127", "def test_gregorian_mismatch(self):\n date = datetime(2017, 1, 1)\n prices = [2] * 7\n expected_sequence = 27\n expected_week = 52\n self.assertEqual(\n star_barcode.date_to_sequence_and_week(\n date=date, price_codes=prices),\n (expected_sequence, expected_week)\n )", "def findFileName(path, slug):\n\tfor attempt in range(0, 99):\n\t\tfile_name = makeFileName(path, slug, attempt)\n\t\tif not os.path.exists(file_name):\n\t\t\treturn file_name\n\n\tprint \"ERROR: Too many clashes trying to create filename \" + makeFileName(path, slug)\n\texit()", "def test_invalid_file_type(barred_tac_list_importer):\n expect_failure(barred_tac_list_importer, exc_message='Wrong suffix')", "def test_create_timestamped_filename(self):\n precision = None\n n_digits = 29\n name_base = 'test_string'\n name_extension = 'wie'\n tsfn = kn.create_timestamped_filename(name_base, name_extension, precision, n_digits)\n self.assertEqual(name_base, tsfn[0:11], msg='prefix name exception')\n n_chars = len(tsfn)\n self.assertEqual(name_extension, tsfn[n_chars-3:n_chars], msg='extension name exception')\n\n precision = 1e-15\n tsfn = kn.create_timestamped_filename(name_base, name_extension, precision, n_digits)\n self.assertEqual(name_base, tsfn[0:11], msg='prefix name exception')\n n_chars = len(tsfn)\n self.assertEqual(name_extension, tsfn[n_chars-3:n_chars], msg='extension name exception')", "def _get_seq_filename(self):\n fnd = self._get_session_dir()\n self.seq_number += 1\n fn = os.path.join(fnd, 'S%4.4d.tif' % self.seq_number)\n return fn", "def valid_barcode(s):\n # implement this function!\n odd_digits = 0\n even_digits = 0\n result = 0\n for i in range(len(s) - 1):\n if i % 2 == 0:\n odd_digits += int(s[i])\n else:\n even_digits += int(s[i])\n result = (3 * odd_digits + even_digits) % 10\n if result != 0:\n result = 10 - result\n\n try:\n if int(s[-1]) == result and len(s) == 12:\n return True\n else:\n return False\n except IndexError:\n return False" ]
[ "0.6670714", "0.6530107", "0.5832887", "0.58181393", "0.5765814", "0.57609874", "0.5743387", "0.5700269", "0.56717837", "0.5667678", "0.56279606", "0.5543977", "0.53887117", "0.5270413", "0.5240893", "0.5214146", "0.52075595", "0.520386", "0.51060236", "0.5103093", "0.50870526", "0.5086315", "0.50763834", "0.50544566", "0.5041361", "0.5019571", "0.50078374", "0.4994067", "0.49869856", "0.49664298" ]
0.77777255
0
barcode_filename should use ISO week year not standard year January 1 2017 is the final ISO day (Sunday) in the final ISO
def test_year_boundary(self): date = datetime(2017, 1, 1) seq = 27 name = star_barcode.barcode_filename(date, seq) self.assertEqual( name, 'Barcode_2016-W52-7_27.pdf' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_ordinary(self):\n date = datetime(2016, 11, 12)\n seq = 36\n name = star_barcode.barcode_filename(date, seq)\n self.assertEqual(\n name,\n 'Barcode_2016-W45-6_36.pdf'\n )", "def get_year(self, filename):\n year = self.file.replace('s24_', '').replace('.vrt', '')\n self.logger.info(f'This .vrt file contains data for the year {year}')\n return year", "def test_price_code_0(self):\n start_date = datetime(2016, 11, 13)\n # Note this is a Sunday, and must have timedelta added\n\n for x in range(1, 7):\n with self.subTest(x=x):\n date = start_date + timedelta(x)\n result = star_barcode.barcode_filename(date, x)\n self.assertEqual(\n result.split('.')[0][-2:],\n f'{x:02}'\n )", "def xlDateISO(xdate):\n # QuantLib doesn't support dates prior to 1901\n # which saves us from dealing with the leap year problem\n if xdate < 367:\n return \"#Date prior to 1901-01-01\"\n \n # python dates are from year zero, excel from 1900\n return date.fromordinal(693594 + int(xdate)).isoformat()", "def convert_date(year: str, week: str):\n date = datetime.fromisocalendar(int(year), int(week), 1)\n return date.strftime(\"%m/%d/%YZ\")", "def date_to_filename(base_path, raw_date_string):\n raw_date_string = raw_date_string[:-1]\n month, day, year = raw_date_string.split(\"/\")\n relative_path = \"{}/{}/{}.md\".format(year, month, day)\n return base_path / relative_path", "def to_filetag(self) -> str:\n return self.strftime(f\"{self.FormatCode.YEAR.WITH_CENTURY}{self.FormatCode.MONTH.NUM}{self.FormatCode.DAY.NUM}\")", "def normalizeFilenameToCommonDateFormat(filename):\n rgx_date = re.search(r'(\\d+)-(\\d+)-(\\d+)', filename)\n\n if (rgx_date == None):\n raise ValueError(\"Not interested in this file!\")\n \n year = rgx_date.group(1)\n month = rgx_date.group(2)\n day = rgx_date.group(3)\n\n return \"%s%s%s.pdf\" % (year, month, day)", "def filename2date(filename):\r\n # Find the '-SC' in the filename.\r\n dash = filename.find('-SC')\r\n if dash:\r\n return datetime.datetime.strptime(filename[dash-7:dash], '%Y%j')\r\n else:\r\n raise ValueError('Landsat filename does not conform to expected format.')", "def get_archive_filename():\r\n today = datetime.date.today()\r\n return str(today)", "def get_dih_filename(year):\r\n return 'DaysInHospital_Y%d.csv' % year", "def test_gregorian_mismatch(self):\n date = datetime(2017, 1, 1)\n prices = [2] * 7\n expected_sequence = 27\n expected_week = 52\n self.assertEqual(\n star_barcode.date_to_sequence_and_week(\n date=date, price_codes=prices),\n (expected_sequence, expected_week)\n )", "def test_wrong_sequence(self):\n date = datetime(2016, 11, 12)\n seq = 31\n with self.assertRaises(ValueError):\n star_barcode.barcode_filename(date, seq)", "def make_year(res):\n return str(res['issued']['date-parts'][0][0])", "def generate_raw_filename(self, source_name, table_name, environment, seq_number, upload_time, load_type,\n file_format):\n file_date = upload_time.strftime(\n \"%Y-%m-%d-%H-%M-%S-%f\")[:-3] # [:-3] => Removing the 3 last characters as %f is for millis.\n res = f'{source_name}/{source_name}_{table_name}/' \\\n f'{source_name}_{environment}_{table_name}_{str(seq_number).zfill(3)}_' \\\n f'{file_date}_utc_{load_type}.{file_format}'\n res = res.lower()\n\n # Check if no illegal chars were passed\n #test = FileNameStandardConvention(res)\n #test.check_naming_convention()\n return res", "def create_filenames(date):\n \n filelist = list()\n for t in [\"0000\", \"0600\", \"1200\", \"1800\"]:\n for t2 in [\"000\", \"003\"]:\n filelist.append(\"gfsanl_4_\"+d.strftime('%Y%m%d')+\"_\"+t+\"_\"+t2+\".grb2\")\n return filelist", "def test_file_name_to_date_zipped(self):\n\n self.assertTrue(\n satellite_io.file_name_to_date(FILE_NAME_ZIPPED) ==\n VALID_DATE_STRING\n )", "def test_file_name_to_date_unzipped(self):\n\n self.assertTrue(\n satellite_io.file_name_to_date(FILE_NAME_ZIPPED) ==\n VALID_DATE_STRING\n )", "def _generate_raw_file_name(self, well, channel, desc):\n \n return \"bPLATE_w\" + well + \"_\" + desc + \"_c\" + channel + \".png\"", "def year(cls, year: typing.Union[int, str])->str:\n yearstr: str\n if isinstance(year, int):\n yearstr = str(year)\n else:\n yearstr = year\n return cls.DATE_AND_TIMES_SIGIL + yearstr + \"-01-01T00:00:00/9\"", "def copyrightRecord(inputstring):\n \n return inputstring[19:24]", "def date_week_of_year(date, *, sunday_is_first_day_of_week: bool = False):\n if sunday_is_first_day_of_week:\n return date.strftime(\"%U\")\n else:\n return date.strftime(\"%V\")", "def test_standard_seq_week(self):\n date = datetime(2016, 11, 15)\n prices = [2] * 7\n expected_sequence = 22\n expected_week = 46\n self.assertEqual(\n star_barcode.date_to_sequence_and_week(\n date=date, price_codes=prices),\n (expected_sequence, expected_week)\n )", "def _retrosheet_filename(game_id, data_root):\n # game id is TTTYYYYMMDDN.\n team = game_id[:3]\n year = game_id[3:7]\n file_pattern = year + team + \".EV*\"\n file_path = os.path.join(data_root, \"retrosheet\", year, file_pattern)\n file_matches = glob.glob(file_path)\n return file_matches[0] if len(file_matches) else None", "def get_file_date(self, file: str) -> date:", "def isoformat(self):\n s = '{0:04}'.format(self._year)\n if self._month:\n s += '-{0:02}'.format(self._month)\n if self._day:\n s += '-{0:02}'.format(self._day)\n return s", "def get_date(self,yearlimits=[1500,2020]):\n\t\thead = self.raw_text()[:300] \t \t \n\t\tparser = Regexdate(head) \t \t\t\n\t\tyear = parser.find_year(yearlimits)\t\t\n\t\tmonth = parser.find_month()\n\t\tday = parser.find_day()\n\t\tif day and year != \"\":\n\t\t\treturn year + \"-\" + month + \"-\" + day\t\n\t\tif year:\n\t\t\treturn year\n\t\treturn \"\"", "def _four_digit_year(t):\n dt = safe_fromtimestamp(t)\n year = dt.year\n if dt.month >= 7:\n year += 1\n return str(year)", "def test_convert_date_to_year(self):\n # TODO there might be a more robust way to write this with try except statements.", "def __parseDailyFilename(self, f):\n base = os.path.basename(f)\n\n tokens = base.split('.')\n if len(tokens) < 6:\n # assume it's an old file in the format A2000089etcetc.tif i.e. ?YYYYDDD*\n yr = base[1:5]\n day = base[5:8]\n else:\n # assume it's a file in the newer format ?*.YYYY.DDD.etc format\n varname, yr, day, temporalSummary, res, spatialSummary = tokens[0:6]\n outTemplate = varname + \"{}.{}.{}.\" + \"{}.{}.{}.tif\".format(temporalSummary, res, spatialSummary)\n if self._outTemplate == \"FILLED-OUTPUT{}.{}.{}.TemporalSummary.Res.SpatialSummary.tif\":\n self._outTemplate = outTemplate\n else:\n assert self._outTemplate == outTemplate\n return day, yr" ]
[ "0.6433659", "0.5829394", "0.5579298", "0.5543711", "0.5455112", "0.54528177", "0.544621", "0.5437168", "0.53968394", "0.53321236", "0.5271344", "0.52466387", "0.5219768", "0.5202958", "0.51598704", "0.5142467", "0.5128275", "0.51264524", "0.51088095", "0.509985", "0.5091835", "0.5085563", "0.50736886", "0.50370467", "0.5032394", "0.5020506", "0.5015143", "0.49998966", "0.49967915", "0.49928886" ]
0.73840857
0
date_to_sequence_and_week handles a typical case Given a datetime object for 20161115 (a Tuesday) and a 7long list of the integer 2 the sequence returned should be 22 and the week 46.
def test_standard_seq_week(self): date = datetime(2016, 11, 15) prices = [2] * 7 expected_sequence = 22 expected_week = 46 self.assertEqual( star_barcode.date_to_sequence_and_week( date=date, price_codes=prices), (expected_sequence, expected_week) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def days_to_weeks(list_of_days):\n all_weeks = []\n for day in list_of_days:\n that_week = day.isocalendar()\n if (\n len(all_weeks) == 0\n or all_weeks[-1].year != that_week.year\n or all_weeks[-1].week != that_week.week\n ):\n all_weeks.append(that_week)\n return list(map(lambda iso: \"{}-{}\".format(iso.year, iso.week), all_weeks))", "def week_range(date):\n # isocalendar calculates the year, week of the year, and day of the week.\n # dow is Mon = 1, Sat = 6, Sun = 7\n year, week, dow = date.isocalendar()\n\n # Find the first day of the week.\n if dow == 7:\n # Since we want to start with Sunday, let's test for that condition.\n start_date = date\n else:\n # Otherwise, subtract `dow` number days to get the first day\n start_date = date - timedelta(dow)\n\n return start_date, start_date + timedelta(6)", "def getWeeks(data: Sequence[HistoryElement]) -> Sequence[int]:\r\n _checkData(data)\r\n return [x.timeStamp.toDateTime().weekday() for x in data]", "def get_next_week(self, startdate):\n dow_today = int(datetime.datetime.strftime(startdate, '%w'))\n days_until_sunday = 7 - ((dow_today + 7) % 7)\n #days_until_sunday = 7 - (dow_today + 1)\n sunday = startdate + datetime.timedelta(days=days_until_sunday)\n following_saturday = sunday + datetime.timedelta(days=6)\n next_week = (sunday, following_saturday)\n return next_week", "def dateTimetoWeekend(dateList, startYear):\n weekHash = dateList.dt.week + 52 * (dateList.dt.year - startYear)\n return weekHash", "def GetWeekNum(self, date):\n (y, m, d) = date.split('-')\n return (dt.date(int(y), int(m), int(d)) - self.START_DATE).days / 7", "def _DayNumToWeekdayNum(daynum):\n return (daynum + _WEEKDAY_BASE) % NUM_WEEKDAYS", "def get_week_range(year, week):\n first_day = datetime.strptime(f\"{year}-W{week}-1\", \"%Y-W%W-%w\").date()\n last_day = first_day + timedelta(days=6)\n return first_day, last_day", "def get_week(date):\n\n # TODO: the API seems broken. It returns week, year not year, week as documentef\n # why not use date.isocalendar() from the stdlib?\n\n date = date_trunc('week', date)\n\n first_monday = date_trunc('week', date_trunc('year', date))\n if first_monday.year < date.year:\n first_monday += datetime.timedelta(weeks=1)\n diff = date_trunc('day', date) - first_monday\n week = 1 + (diff.days / 7)\n return week, first_monday.year", "def WEEKDAY(\n serial_number: func_xltypes.XlNumber,\n return_type: func_xltypes.XlNumber = None\n) -> func_xltypes.XlNumber:\n\n date = utils.number_to_datetime(int(serial_number))\n\n if return_type is None:\n # Numbers 1 (Sunday) through 7 (Saturday)\n weekDays = (2, 3, 4, 5, 6, 7, 1)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 1:\n # Numbers 1 (Sunday) through 7 (Saturday)\n weekDays = (2, 3, 4, 5, 6, 7, 1)\n return weekDays[date.weekday()]\n\n # weekday() is 0 based, starting on a Monday\n elif int(return_type) == 2:\n # Numbers 1 (Monday) through 7 (Sunday)\n weekDays = (1, 2, 3, 4, 5, 6, 7)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 3:\n # Numbers 0 (Monday) through 6 (Sunday)\n weekDays = (0, 1, 2, 3, 4, 5, 6)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 11:\n # Numbers 1 (Monday) through 7 (Sunday)\n weekDays = (1, 2, 3, 4, 5, 6, 7)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 12:\n # Numbers 1 (Tuesday) through 7 (Monday)\n weekDays = (7, 1, 2, 3, 4, 5, 6)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 13:\n # Numbers 1 (Wednesday) through 7 (Tuesday)\n weekDays = (6, 7, 1, 2, 3, 4, 5)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 14:\n # Numbers 1 (Thursday) through 7 (Wednesday)\n weekDays = (5, 6, 7, 1, 2, 3, 4)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 15:\n # Numbers 1 (Friday) through 7 (Thursday)\n weekDays = (4, 5, 6, 7, 1, 2, 3)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 16:\n # Numbers 1 (Saturday) through 7 (Friday)\n weekDays = (3, 4, 5, 6, 7, 1, 2)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 17:\n # Numbers 1 (Sunday) through 7 (Saturday)\n weekDays = (2, 3, 4, 5, 6, 7, 1)\n return weekDays[date.weekday()]\n\n else:\n raise xlerrors.NumExcelError(\n f\"return_type needs to be omitted or one of 1, 2, 3, 11, 12, 13,\\\n 14, 15, 16 or 17. You supplied {return_type}\")", "def get_week_from_date(date) -> int:\n month, year = date.month, date.year\n if month < 4:\n year -= 1\n ld = _labor_day(year)\n wk1_wed = ld + timedelta(days=2)\n days_since = (date - wk1_wed).days\n weeks_since = days_since / 7.\n week = math.floor(weeks_since) + 1\n return int(week)", "def generate_seven_days(start_date):\n\n seven_days = []\n\n for i in xrange(7):\n days_to_add = datetime.timedelta(days=i)\n seven_days.append([start_date + days_to_add, 0])\n\n return seven_days", "def get_week_days(year, week):\n d = dt.date(year, 1, 1)\n if(d.weekday() > 3):\n d = d + dt.timedelta(7 - d.weekday())\n else:\n d = d - dt.timedelta(d.weekday())\n dlt = dt.timedelta(days = (week - 1) * 7)\n return d + dlt #, d + dlt + dt.timedelta(days = 6)", "def date_to_week(y, m, d):\r\n return datetime.datetime(y, m, d).strftime(r'%YW%W')", "def find_date(startdate, weekday, weeknumber):\n import datetime\n # The +1 makes this match up with linux times (day 1 = Monday)\n daysahead = weekday - (startdate.weekday() + 1)\n if daysahead < 0:\n # Target day already happened this week\n daysahead += 7\n # Add 7 days for each Week Of Month we want - but 'This' week is week 1\n daysahead += 7 * (weeknumber - 1)\n return startdate + datetime.timedelta(daysahead)", "def _next_week(self) -> datetime.datetime:\n now = datetime.datetime.now()\n for i in range(7):\n yield now + datetime.timedelta(i)", "def ISOWEEKNUM(date):\n return _make_datetime(date).isocalendar()[1]", "def WEEKNUM(date, return_type=1):\n if return_type == 21:\n return ISOWEEKNUM(date)\n if return_type not in _weekday_type_map:\n raise ValueError(\"Invalid return type %s\" % (return_type,))\n (first, index) = _weekday_type_map[return_type]\n date = _make_datetime(date)\n jan1 = datetime.datetime(date.year, 1, 1)\n week1_start = jan1 - datetime.timedelta(days=(jan1.weekday() - first) % 7)\n return (date - week1_start).days // 7 + 1", "def week(self):\n J = self.JulianDay()\n d4 = (J + 31741 - (J % 7)) % 146097 % 36524 % 1461\n L = d4 // 1460\n d1 = ((d4 - L) % 365) + L\n return d1 // 7 + 1", "def dates_of_the_week():\n date_list = list()\n now = datetime.datetime.now()\n monday = now - datetime.timedelta(days=now.weekday(), hours=now.hour, minutes=now.minute, seconds=now.second,\n microseconds=now.microsecond)\n date_list.append(monday)\n for each in range(1, 6):\n monday = monday + datetime.timedelta(days=1)\n date_list.append(monday)\n date_list.append((monday + datetime.timedelta(days=1, hours=23, minutes=59, seconds=59)))\n return date_list", "def weekly():", "def test_week_in_range(self):\n weeks = list(range(1, 54))\n seq = 21\n for week in weeks:\n with self.subTest(week=week):\n result = star_barcode.construct_postscript(\n week=week,\n bwipp_location=self.bwipp,\n issn=self.issn,\n sequence=seq,\n header_line=''\n )\n self.assertGreater(\n result.find(f'{self.issn} {seq:02} {week:02}'),\n -1\n )", "def _create_week_dates_text(self):\n week_start = []\n week_end = []\n week_text = []\n week_start.append(self.start_date)\n week_end.append(self.start_date + timedelta(days=6))\n week_start.append(week_end[0] + timedelta(days=1))\n week_end.append(self.display_end_date)\n for i in (0,1):\n week_start_month = week_start[i].strftime(\"%b\")\n week_start_day = week_start[i].strftime(\"%d\").lstrip(\"0\")\n week_end_month = week_end[i].strftime(\"%b\")\n week_end_day = week_end[i].strftime(\"%d\").lstrip(\"0\")\n week_text.append(\"%s %s - %s %s\" %(week_start_month, \n week_start_day, week_end_month, week_end_day))\n return week_text", "def record_weeks(self, user, start, end, num=10):\n query = self.user_weeks_between(user, start, end).order_by('-plays')[:num]\n for week in query:\n date = ldates.date_of_index(week.week_idx)\n yield week, date", "def dayweek_clean(fecha):\n\n try:\n lista = fecha.split(sep = '/')\n fecha = '-'.join(reversed(lista))\n temp = pd.Timestamp(fecha)\n dia_semana = (temp.dayofweek, temp.day_name())\n return dia_semana[1]\n \n except:\n #print ('hola')\n return None", "def main():\n print(day_of_week(datetime.now()))\n print(day_of_week(datetime(2019, 7, 4)))\n print(day_of_week(datetime(2013, 12, 25)))\n print(day_of_week(datetime(2000, 1, 1)))", "def get_week_of_year(date, padded_or_unpadded, start_Sunday_or_Monday):\n if start_Sunday_or_Monday == constants.str_Sunday:\n week_of_year = date.strftime('%U')\n elif start_Sunday_or_Monday == constants.str_Monday:\n week_of_year = date.strftime('%W')\n else:\n err_msg = str_possible_values('start_Sunday_or_Monday', [\n constants.str_Sunday, constants.str_Monday])\n raise ValueError(err_msg)\n\n if padded_or_unpadded == constants.str_padded:\n return week_of_year\n elif padded_or_unpadded == constants.str_unpadded:\n return str(int(week_of_year))\n else:\n err_msg = str_possible_values('padded_or_unpadded', [\n constants.str_padded, constants.str_unpadded])\n raise ValueError(err_msg)", "def WEEKDAY(date, return_type=1):\n if return_type not in _weekday_type_map:\n raise ValueError(\"Invalid return type %s\" % (return_type,))\n (first, index) = _weekday_type_map[return_type]\n return (_make_datetime(date).weekday() - first) % 7 + index", "def test_weeks(self):\n d = datetime(2014, 1, 29)\n eq_(week_start(d), datetime(2014, 1, 27, 0, 0, 0))\n eq_(week_end(d), datetime(2014, 2, 2, 23, 59, 59))", "def ISOWEEKNUM(\n date: func_xltypes.XlDateTime\n) -> func_xltypes.XlNumber:\n\n datetime_date = utils.number_to_datetime(int(date))\n isoweeknum = datetime_date.isocalendar()[1]\n return isoweeknum" ]
[ "0.61387986", "0.60740703", "0.6026458", "0.5912314", "0.59121364", "0.58824337", "0.5854664", "0.58490837", "0.5826529", "0.58078784", "0.5777913", "0.5774713", "0.57356215", "0.5734437", "0.57301974", "0.5707575", "0.5675245", "0.56201994", "0.54466105", "0.5442153", "0.54252625", "0.540921", "0.53988665", "0.5365329", "0.5346192", "0.5339034", "0.5332185", "0.5330321", "0.5327308", "0.53253776" ]
0.657142
0
date_to_sequence_and_week handles a short price code list Given a datetime object for 20161115 (a Tuesday) and a 2long list of the integer 2 the sequence returned should be 22 and the week 46.
def test_short_prices(self): date = datetime(2016, 11, 15) prices = [2] * 2 expected_sequence = 22 expected_week = 46 self.assertEqual( star_barcode.date_to_sequence_and_week( date=date, price_codes=prices), (expected_sequence, expected_week) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_standard_seq_week(self):\n date = datetime(2016, 11, 15)\n prices = [2] * 7\n expected_sequence = 22\n expected_week = 46\n self.assertEqual(\n star_barcode.date_to_sequence_and_week(\n date=date, price_codes=prices),\n (expected_sequence, expected_week)\n )", "def dateTimetoWeekend(dateList, startYear):\n weekHash = dateList.dt.week + 52 * (dateList.dt.year - startYear)\n return weekHash", "def days_to_weeks(list_of_days):\n all_weeks = []\n for day in list_of_days:\n that_week = day.isocalendar()\n if (\n len(all_weeks) == 0\n or all_weeks[-1].year != that_week.year\n or all_weeks[-1].week != that_week.week\n ):\n all_weeks.append(that_week)\n return list(map(lambda iso: \"{}-{}\".format(iso.year, iso.week), all_weeks))", "def generate_seven_days(start_date):\n\n seven_days = []\n\n for i in xrange(7):\n days_to_add = datetime.timedelta(days=i)\n seven_days.append([start_date + days_to_add, 0])\n\n return seven_days", "def week_range(date):\n # isocalendar calculates the year, week of the year, and day of the week.\n # dow is Mon = 1, Sat = 6, Sun = 7\n year, week, dow = date.isocalendar()\n\n # Find the first day of the week.\n if dow == 7:\n # Since we want to start with Sunday, let's test for that condition.\n start_date = date\n else:\n # Otherwise, subtract `dow` number days to get the first day\n start_date = date - timedelta(dow)\n\n return start_date, start_date + timedelta(6)", "def WEEKDAY(\n serial_number: func_xltypes.XlNumber,\n return_type: func_xltypes.XlNumber = None\n) -> func_xltypes.XlNumber:\n\n date = utils.number_to_datetime(int(serial_number))\n\n if return_type is None:\n # Numbers 1 (Sunday) through 7 (Saturday)\n weekDays = (2, 3, 4, 5, 6, 7, 1)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 1:\n # Numbers 1 (Sunday) through 7 (Saturday)\n weekDays = (2, 3, 4, 5, 6, 7, 1)\n return weekDays[date.weekday()]\n\n # weekday() is 0 based, starting on a Monday\n elif int(return_type) == 2:\n # Numbers 1 (Monday) through 7 (Sunday)\n weekDays = (1, 2, 3, 4, 5, 6, 7)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 3:\n # Numbers 0 (Monday) through 6 (Sunday)\n weekDays = (0, 1, 2, 3, 4, 5, 6)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 11:\n # Numbers 1 (Monday) through 7 (Sunday)\n weekDays = (1, 2, 3, 4, 5, 6, 7)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 12:\n # Numbers 1 (Tuesday) through 7 (Monday)\n weekDays = (7, 1, 2, 3, 4, 5, 6)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 13:\n # Numbers 1 (Wednesday) through 7 (Tuesday)\n weekDays = (6, 7, 1, 2, 3, 4, 5)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 14:\n # Numbers 1 (Thursday) through 7 (Wednesday)\n weekDays = (5, 6, 7, 1, 2, 3, 4)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 15:\n # Numbers 1 (Friday) through 7 (Thursday)\n weekDays = (4, 5, 6, 7, 1, 2, 3)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 16:\n # Numbers 1 (Saturday) through 7 (Friday)\n weekDays = (3, 4, 5, 6, 7, 1, 2)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 17:\n # Numbers 1 (Sunday) through 7 (Saturday)\n weekDays = (2, 3, 4, 5, 6, 7, 1)\n return weekDays[date.weekday()]\n\n else:\n raise xlerrors.NumExcelError(\n f\"return_type needs to be omitted or one of 1, 2, 3, 11, 12, 13,\\\n 14, 15, 16 or 17. You supplied {return_type}\")", "def weekly():", "def get_next_week(self, startdate):\n dow_today = int(datetime.datetime.strftime(startdate, '%w'))\n days_until_sunday = 7 - ((dow_today + 7) % 7)\n #days_until_sunday = 7 - (dow_today + 1)\n sunday = startdate + datetime.timedelta(days=days_until_sunday)\n following_saturday = sunday + datetime.timedelta(days=6)\n next_week = (sunday, following_saturday)\n return next_week", "def datetime_demo():\n day = pd.to_datetime('20170123')\n\n df1 = pd.DataFrame({'day': ['20170101','20170102'] , 'demand': [ 100 , 120] })\n\n df1['day'] = df1['day'].apply(lambda x : pd.to_datetime(x))\n # df1['day'] = pd.to_datetime(df1['day']) is also right.\n\n\n # date_range\n pd.date_range(start = '20170101' ,end = '20170201' )\n pd.date_range(start = '20170101' ,periods=10)\n pd.date_range(start = '20170101' ,periods=10, freq='W') # ten week\n pd.date_range(start = '20170101' ,periods=10, freq='M') # ten monthes\n pd.date_range(start = '20170101' ,periods=10, freq='MS') # ten monthes\n pd.date_range(start = '20170102' ,periods=10, freq='MS') # ten monthes MS: month start.\n \"\"\"\n\n 所有的 freq string 会被转成 DateOffset subclass,之后再执行\n Alias\tDescription\n B\tbusiness day frequency\n C\tcustom business day frequency\n D\tcalendar day frequency\n W\tweekly frequency\n M\tmonth end frequency\n SM\tsemi-month end frequency (15th and end of month)\n BM\tbusiness month end frequency\n CBM\tcustom business month end frequency\n MS\tmonth start frequency\n SMS\tsemi-month start frequency (1st and 15th)\n BMS\tbusiness month start frequency\n CBMS\tcustom business month start frequency\n Q\tquarter end frequency\n BQ\tbusiness quarter end frequency\n QS\tquarter start frequency\n BQS\tbusiness quarter start frequency\n A, Y\tyear end frequency\n BA, BY\tbusiness year end frequency\n AS, YS\tyear start frequency\n BAS, BYS\tbusiness year start frequency\n BH\tbusiness hour frequency\n H\thourly frequency\n T, min\tminutely frequency\n S\tsecondly frequency\n L, ms\tmilliseconds\n U, us\tmicroseconds\n N\tnanoseconds\n \"\"\"\n\n\n # DateOffset\n\n d = datetime(2008, 8, 18, 9, 0)\n d + relativedelta(months=4, days=5)\n\n\n from pandas.tseries.offsets import *\n\n\n d + DateOffset(months=4, days=5)\n\n\n offset = BMonthEnd()\n offset.rollback(d)\n offset.rollforward(d)\n\n\n\n\n\n\n\n\n\n pass", "def getWeeks(data: Sequence[HistoryElement]) -> Sequence[int]:\r\n _checkData(data)\r\n return [x.timeStamp.toDateTime().weekday() for x in data]", "def test_week_in_range(self):\n weeks = list(range(1, 54))\n seq = 21\n for week in weeks:\n with self.subTest(week=week):\n result = star_barcode.construct_postscript(\n week=week,\n bwipp_location=self.bwipp,\n issn=self.issn,\n sequence=seq,\n header_line=''\n )\n self.assertGreater(\n result.find(f'{self.issn} {seq:02} {week:02}'),\n -1\n )", "def _create_week_dates_text(self):\n week_start = []\n week_end = []\n week_text = []\n week_start.append(self.start_date)\n week_end.append(self.start_date + timedelta(days=6))\n week_start.append(week_end[0] + timedelta(days=1))\n week_end.append(self.display_end_date)\n for i in (0,1):\n week_start_month = week_start[i].strftime(\"%b\")\n week_start_day = week_start[i].strftime(\"%d\").lstrip(\"0\")\n week_end_month = week_end[i].strftime(\"%b\")\n week_end_day = week_end[i].strftime(\"%d\").lstrip(\"0\")\n week_text.append(\"%s %s - %s %s\" %(week_start_month, \n week_start_day, week_end_month, week_end_day))\n return week_text", "def sacred_wednesdays_in_range(range):\n a = range[0]\n b = range[1]\n wed = DayOfWeek.Wednesday.on_or_after(a)\n h_date = HinduLunarDate.from_fixed(wed)\n ell = [wed] if (h_date.day == 8) else []\n if is_in_range(wed, range):\n ell[:0] = sacred_wednesdays_in_range([wed + 1, b])\n return ell\n else:\n return []", "def date_to_week(y, m, d):\r\n return datetime.datetime(y, m, d).strftime(r'%YW%W')", "def main():\n print(day_of_week(datetime.now()))\n print(day_of_week(datetime(2019, 7, 4)))\n print(day_of_week(datetime(2013, 12, 25)))\n print(day_of_week(datetime(2000, 1, 1)))", "def DateFormated(x):\n \n x1=[]\n for i in x:\n if len(i)==19:\n x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(i[14:16]),int(i[17:18]) ))\n# elif len(i)==13:\n# x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(0),int(0) ))\n# else:\n# x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(0),int(0),int(0) ))\n# del i,x\n return x1", "def find_date(startdate, weekday, weeknumber):\n import datetime\n # The +1 makes this match up with linux times (day 1 = Monday)\n daysahead = weekday - (startdate.weekday() + 1)\n if daysahead < 0:\n # Target day already happened this week\n daysahead += 7\n # Add 7 days for each Week Of Month we want - but 'This' week is week 1\n daysahead += 7 * (weeknumber - 1)\n return startdate + datetime.timedelta(daysahead)", "def _DayNumToWeekdayNum(daynum):\n return (daynum + _WEEKDAY_BASE) % NUM_WEEKDAYS", "def test_gregorian_mismatch(self):\n date = datetime(2017, 1, 1)\n prices = [2] * 7\n expected_sequence = 27\n expected_week = 52\n self.assertEqual(\n star_barcode.date_to_sequence_and_week(\n date=date, price_codes=prices),\n (expected_sequence, expected_week)\n )", "def _next_week(self) -> datetime.datetime:\n now = datetime.datetime.now()\n for i in range(7):\n yield now + datetime.timedelta(i)", "def get_week_range(year, week):\n first_day = datetime.strptime(f\"{year}-W{week}-1\", \"%Y-W%W-%w\").date()\n last_day = first_day + timedelta(days=6)\n return first_day, last_day", "def WEEKNUM(date, return_type=1):\n if return_type == 21:\n return ISOWEEKNUM(date)\n if return_type not in _weekday_type_map:\n raise ValueError(\"Invalid return type %s\" % (return_type,))\n (first, index) = _weekday_type_map[return_type]\n date = _make_datetime(date)\n jan1 = datetime.datetime(date.year, 1, 1)\n week1_start = jan1 - datetime.timedelta(days=(jan1.weekday() - first) % 7)\n return (date - week1_start).days // 7 + 1", "def doomsday(y):", "def construct_date_list(start=None, num=1):\n if not start:\n start = time.strftime(\"%Y%m%d\", time.gmtime(time.time() - num * 60 * 60 * 24))\n\n elif len(start) != 8:\n raise Exception(\"Date is not in expected format!\")\n\n startdatetime = datetime.datetime.strptime(start, '%Y%m%d')\n\n datelist = [startdatetime + datetime.timedelta(days=i) for i in range(0, num)]\n return [date.strftime('%Y%m%d') for date in datelist]", "def calculate_return_day(yyyymmdd, hhmmss, week):\n return_day_number = datetime(yyyymmdd[0], yyyymmdd[1], yyyymmdd[2]).weekday()\n return_day_name = week[return_day_number]\n return_day_hour = hhmmss[0]\n return [return_day_number, return_day_name, return_day_hour]", "def next_date(date):\n #For this function, I just created as many if else statements as I could to cover every situation I could think of.\n #Most of these if else statements are distinct edge cases where I add 1 in a different spot each time.\n if date[0] == 1 or date[0] == 3 or date[0] == 5 or date[0] == 7 or date[0] == 8 or date[0] == 10:\n if date[1] < 31:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 31:\n nextday = (date[0] + 1, 1, date[2])\n return nextday\n elif date[0] == 12:\n if date[1] < 31:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 31:\n nextday = (1, 1, date[2] + 1)\n return nextday\n elif date[0] == 4 or date[0] == 6 or date[0] == 9 or date[0] == 11:\n if date[1] < 30:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 30:\n nextday = (date[0] + 1, 1, date[2])\n return nextday\n elif date[0] == 2:\n if date[2] % 4 == 0 or date[2] % 1000 == 0:\n if date[1] < 29:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 29:\n nextday = (date[0] + 1, 1, date[2])\n return nextday\n elif date[1] < 28:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 28:\n nextday = (date[0] + 1, 1, date[2])\n return nextday", "def get_week_of_year(date, padded_or_unpadded, start_Sunday_or_Monday):\n if start_Sunday_or_Monday == constants.str_Sunday:\n week_of_year = date.strftime('%U')\n elif start_Sunday_or_Monday == constants.str_Monday:\n week_of_year = date.strftime('%W')\n else:\n err_msg = str_possible_values('start_Sunday_or_Monday', [\n constants.str_Sunday, constants.str_Monday])\n raise ValueError(err_msg)\n\n if padded_or_unpadded == constants.str_padded:\n return week_of_year\n elif padded_or_unpadded == constants.str_unpadded:\n return str(int(week_of_year))\n else:\n err_msg = str_possible_values('padded_or_unpadded', [\n constants.str_padded, constants.str_unpadded])\n raise ValueError(err_msg)", "def test_jd2dow():\n\tjd = [2434923.5,2458130.5]\n\tdnum_true = [3,5]\n\tdnam_true = np.array(['Wed','Fri'],dtype='|S3')\n\tdnum_test, dnam_test = date_functions.jd2dow( jd )\n\t\n\tassert dnum_test[0] == dnum_true[0]\n\tassert dnum_test[1] == dnum_true[1]\n\tassert dnam_test[0] == dnam_true[0]\n\tassert dnam_test[1] == dnam_true[1]", "def GetWeekNum(self, date):\n (y, m, d) = date.split('-')\n return (dt.date(int(y), int(m), int(d)) - self.START_DATE).days / 7", "def yearMonSeqGenerator(startDate='1970-01-01',endDate=pd.to_datetime('today')):\n dtRng=pd.date_range(startDate,endDate,freq='MS')\n return pd.DataFrame(\n {\"YearMonStr\":dtRng.strftime(\"%Y%b\").tolist(),\n \"YearMonNum\":dtRng.strftime(\"%Y%m\").tolist()})" ]
[ "0.67803526", "0.59415525", "0.55468106", "0.5470812", "0.54572535", "0.5452534", "0.5385184", "0.5376735", "0.53491974", "0.52875465", "0.5275247", "0.5237731", "0.5220215", "0.5215518", "0.5195454", "0.5180022", "0.51517755", "0.514522", "0.51418126", "0.5131633", "0.5122742", "0.5112691", "0.5104034", "0.51006263", "0.5094289", "0.50774395", "0.5039316", "0.50337476", "0.503135", "0.500836" ]
0.66608876
1
construct_postscript raises ValueError if bwipp is missing If the location passed to construct_postscript for the location of the bwipp postscript library does not exist, then it should raise ValueError.
def test_missing_bwipp(self): seq = 21 week = 46 header = 'MSTAR 2016-11-14 MON 1.0' with self.assertRaisesRegex(ValueError, 'BWIPP'): star_barcode.construct_postscript( bwipp_location=Path('/fake-path/not-here.ps'), issn=self.issn, sequence=seq, week=week, header_line=header )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_typical(self):\n seq = 21\n week = 46\n header = 'MSTAR 2016-11-14 MON 1.0'\n issn_args = ' '.join([self.issn, str(seq), str(week)])\n result = star_barcode.construct_postscript(\n bwipp_location=self.bwipp,\n issn=self.issn,\n sequence=seq,\n week=week,\n header_line=header\n )\n self.assertGreater(result.find(str(self.bwipp)), -1)\n self.assertGreater(result.find(issn_args), -1)\n self.assertGreater(result.find(header), -1)", "def isPostscript(fmt):\n if fmt == 'POST' or fmt == 'PSCL' or fmt == 'PDF':\n return 1\n return 0", "def test_getLocationFromPostcode3(self):\n \n pstPrc=PostcodeProcessor()\n try:\n _coords=pstPrc.getLocationFromPostcode(self.postcode3)\n self.assertTrue(0,'Bad coordinate should not return postcode.')\n except:\n self.assertRaises(HTTPError)", "def test_getLocationFromPostcodeKML3(self):\n \n pstPrc=PostcodeProcessor()\n try:\n _coords=pstPrc.getLocationFromPostcodeKML(self.postcode3)\n self.assertTrue(0,'Bad coordinate should not return postcode.')\n except:\n self.assertRaises(HTTPError)", "def test_issn_incorrect_length(self):\n issns = ['0307-15', '0307-15789', '03071758', '0307175']\n for num in issns:\n with self.subTest(num=num):\n with self.assertRaisesRegex(ValueError, num):\n star_barcode.construct_postscript(\n issn=num,\n bwipp_location=self.bwipp,\n sequence=21,\n week=46,\n header_line=''\n )", "def create_rib_vertical_post(component, comp_occurrence, wing_body, rib_body, rib_post_loc, rib_post_width,\n rib_post_triangle_len):\n # log('creating rib post of width', rib_post_width, ' at ', rib_post_loc)\n # create 2 planes, rib_post_width apart, centered on rib_post_loc\n\n p1loc = rib_post_loc - (rib_post_width / 2)\n p2loc = rib_post_loc + (rib_post_width / 2)\n planes = component.constructionPlanes\n\n plane1_input = planes.createInput()\n plane1_input.setByOffset(vert_spanwise_plane(component), ValueInput.createByReal(p1loc))\n plane1 = planes.add(plane1_input)\n\n plane2_input = planes.createInput()\n plane2_input.setByOffset(vert_spanwise_plane(component), ValueInput.createByReal(p2loc))\n plane2 = planes.add(plane2_input)\n\n post = boundary_fill_between_planes(component, comp_occurrence, rib_body, plane1, plane2)\n\n # hide the construction planes\n plane1.isLightBulbOn = False\n plane2.isLightBulbOn = False\n\n # get dimensions of post\n bounding_box = post.boundingBox\n top = project_coord(bounding_box.maxPoint.asArray(), VERTICAL_UP_DIRECTION.asArray())\n bottom = project_coord(bounding_box.minPoint.asArray(), VERTICAL_UP_DIRECTION.asArray())\n spanwise_mid = project_coord(centroid_of_bounding_box(bounding_box).asArray(), SPANWISE_DIRECTION.asArray())\n\n assert plane1.isValid\n\n sketch = component.sketches.add(plane2, comp_occurrence)\n lines = sketch.sketchCurves.sketchLines\n\n tri_side = rib_post_triangle_len\n\n # only create triangles if the section is tall enough\n if top - bottom > 2 * tri_side:\n p1 = sketch.modelToSketchSpace(point(chordwise=p1loc, spanwise=spanwise_mid, vertical=top - tri_side))\n p2 = sketch.modelToSketchSpace(point(chordwise=p1loc, spanwise=spanwise_mid + tri_side, vertical=top))\n p3 = sketch.modelToSketchSpace(point(chordwise=p1loc, spanwise=spanwise_mid - tri_side, vertical=top))\n\n lines.addByTwoPoints(p1, p2)\n lines.addByTwoPoints(p2, p3)\n lines.addByTwoPoints(p3, p1)\n\n p1 = sketch.modelToSketchSpace(point(chordwise=p1loc, spanwise=spanwise_mid, vertical=bottom + tri_side))\n p2 = sketch.modelToSketchSpace(point(chordwise=p1loc, spanwise=spanwise_mid + tri_side, vertical=bottom))\n p3 = sketch.modelToSketchSpace(point(chordwise=p1loc, spanwise=spanwise_mid - tri_side, vertical=bottom))\n\n lines.addByTwoPoints(p1, p2)\n lines.addByTwoPoints(p2, p3)\n lines.addByTwoPoints(p3, p1)\n\n # extrude the 2 triangular profiles just created\n assert sketch.profiles.count == 2, \"expected 2 triangle profiles in the sketch just created\"\n profile = sketch.profiles.item(0)\n extrudes = component.features.extrudeFeatures\n top_triangle_extrusion = extrudes.addSimple(profile, ValueInput.createByReal(rib_post_width),\n FeatureOperations.NewBodyFeatureOperation)\n top_triangle = top_triangle_extrusion.bodies.item(0)\n top_triangle.name = 'top_triangle'\n\n profile = sketch.profiles.item(1)\n extrudes = component.features.extrudeFeatures\n bottom_triangle_extrusion = extrudes.addSimple(profile, ValueInput.createByReal(rib_post_width),\n FeatureOperations.NewBodyFeatureOperation)\n bottom_triangle = bottom_triangle_extrusion.bodies.item(0)\n bottom_triangle.name = 'bottom_triangle'\n\n # now trim the triangles to the intersection with the wing body\n tool_bodies = ObjectCollection.create()\n tool_bodies.add(wing_body)\n combines = component.features.combineFeatures\n\n combine_input = combines.createInput(top_triangle, tool_bodies)\n combine_input.isKeepToolBodies = True\n combine_input.isNewComponent = False\n combine_input.operation = FeatureOperations.IntersectFeatureOperation\n combines.add(combine_input)\n\n combine_input = combines.createInput(bottom_triangle, tool_bodies)\n combine_input.isKeepToolBodies = True\n combine_input.isNewComponent = False\n combine_input.operation = FeatureOperations.IntersectFeatureOperation\n combines.add(combine_input)\n\n return post", "def postscript(self):\n self.g.postscript_output(fileName='tmp2.ps',decorations='no')", "def populate_voc(template, outdir, imgptf, bbox, label):\n\n # read the template\n bs_data = read_xml(template)\n\n # insert folder path\n fold = bs_data.folder\n fold.string = os.path.split(imgptf)[0]\n\n # insert filename\n fname = bs_data.filename\n fname.string = os.path.split(imgptf)[1]\n\n # get the whole dimensions\n wd, ht = get_dim(imgptf)\n width = bs_data.size.width\n width.string = str(wd)\n height = bs_data.size.height\n height.string = str(ht)\n\n # copy the object tag for however many bounding boxes are in the ROI\n flag = 1\n\n # check that there is more than one labeled region in image\n if len(bbox.shape) > 1:\n while flag < bbox.shape[0]:\n bs_data.annotation.append(copy.copy(bs_data.object))\n flag += 1\n\n # select all empty elements in the xml document\n nns = bs_data.select('name:empty') # list of empty name tags\n xmins = bs_data.select('xmin:empty')\n ymins = bs_data.select('ymin:empty')\n xmaxs = bs_data.select('xmax:empty')\n ymaxs = bs_data.select('ymax:empty')\n\n if len(nns) > 1:\n # bounding box info\n # for now just select the first tuple and hardcode other into [012221]\n for ii in range(bbox.shape[0]):\n # enter the label string\n name = nns[ii]\n name.string = label[ii]\n\n # enter the corresponding bbox location\n bb = bbox[ii, ::]\n xmin = xmins[ii]\n xmin.string = str(bb[1])\n ymin = ymins[ii]\n ymin.string = str(bb[0])\n xmax = xmaxs[ii]\n xmax.string = str(bb[3])\n ymax = ymaxs[ii]\n ymax.string = str(bb[2])\n else:\n ii = 0\n # enter the label string\n name = nns[ii]\n name.string = label[ii]\n\n # enter the corresponding bbox location\n xmin = xmins[ii]\n xmin.string = str(bbox[1])\n ymin = ymins[ii]\n ymin.string = str(bbox[0])\n xmax = xmaxs[ii]\n xmax.string = str(bbox[3])\n ymax = ymaxs[ii]\n ymax.string = str(bbox[2])\n\n outpath = os.path.join(outdir, os.path.splitext(os.path.basename(imgptf))[0]+'.xml')\n\n # save it\n with open(outpath, 'w') as ff:\n ff.write(str(bs_data))\n ff.close()", "def __init__(self, *args, **kwargs):\n _gdi_.PostScriptDC_swiginit(self,_gdi_.new_PostScriptDC(*args, **kwargs))", "def __init__(__self__, *,\n galleries: pulumi.Input[Sequence[pulumi.Input['WorkbookTemplateGalleryArgs']]],\n resource_group_name: pulumi.Input[str],\n template_data: Any,\n author: Optional[pulumi.Input[str]] = None,\n localized: Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input['WorkbookTemplateLocalizedGalleryArgs']]]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n priority: Optional[pulumi.Input[int]] = None,\n resource_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"galleries\", galleries)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n pulumi.set(__self__, \"template_data\", template_data)\n if author is not None:\n pulumi.set(__self__, \"author\", author)\n if localized is not None:\n pulumi.set(__self__, \"localized\", localized)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if priority is not None:\n pulumi.set(__self__, \"priority\", priority)\n if resource_name is not None:\n pulumi.set(__self__, \"resource_name\", resource_name)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def prepare_for_blogger(args):\n title, posts = parse_markdown(os.path.join(args.root, 'index.md'))\n online_images, online_videos = online_images_url(args)\n\n if args.check_images and check_images(args, posts, online_images) is False:\n pass\n\n html = compose_blogger_html(args, title, posts, online_images, online_videos)\n\n if args.full is False:\n html = re.search('<body>(.*)?</body>', html, flags=re.DOTALL).group(1)\n html = re.sub('<script>.*?</script>', '', html, flags=re.DOTALL)\n html = STYLE.replace('%%', '%') + html\n\n if args.dest:\n with open(args.dest, 'wt', encoding='utf-8') as f:\n f.write(html)\n else:\n clipboard.copy(html)", "def prepare_bpe():\r\n bpe = spm.SentencePieceProcessor()\r\n bpe.load(os.path.join(Params.dataset_path, Params.bpe_model))\r\n return bpe", "def __init__(self, template=site_template):\n self.template = template\n self.template_file = None\n if not os.path.exists(self.template):\n raise OpagMissingPrecondition, \"%s does not exist\" % self.template", "def test_week_wrong(self):\n weeks = [0, 54]\n for week in weeks:\n with self.subTest(week=week):\n with self.assertRaisesRegex(ValueError, str(week)):\n star_barcode.construct_postscript(\n week=week,\n bwipp_location=self.bwipp,\n issn=self.issn,\n sequence=21,\n header_line=''\n )", "def test_getLocationFromPostcodeKML2(self):\n \n pstPrc=PostcodeProcessor()\n try:\n _coords=pstPrc.getLocationFromPostcodeKML(self.postcode2)\n self.assertTrue(0,'Bad coordinate should not return postcode.')\n except:\n self.assertRaises(KeyError)", "def bimport(filepath, resource_path=None, imgi_import=True, imge_import=True, seq_import=True, mov_import=True, txti_import=True, txte_import=True,\r\n script_import=True, img_embed=False, txt_embed=None, skip_sha1=False, img_merge=True):\r\n \r\n filepath = bpy.path.abspath(filepath) #Ensure path is absolute\r\n \r\n if resource_path is None or resource_path.strip() == \"\":\r\n resource_path = None\r\n else:\r\n resource_path = bpy.path.abspath(resource_path) #Ensure path is absolute\r\n \r\n if path.splitext(filepath)[1] == \".blib\":\r\n try:\r\n archive = zf.ZipFile(filepath, 'r')\r\n except zf.BadZipFile:\r\n raise InvalidBlibFile(\"File is not a valid Blender library\")\r\n \r\n blib = True\r\n try:\r\n file_checksum, blibtype, file_version, compatible, *rest = archive.comment.decode(\"utf-8\").split(\" \")\r\n except ValueError:\r\n raise InvalidBlibFile(\"File is broken, missing meta-data\")\r\n \r\n compatible = Version(compatible)\r\n \r\n if blibtype == \"cycles\":\r\n if compatible <= version:\r\n if archive.testzip() is not None:\r\n raise InvalidBlibFile(\"File is broken\")\r\n else:\r\n if not skip_sha1:\r\n checksum = archive_sha1(archive)\r\n \r\n if not file_checksum == checksum.hexdigest():\r\n raise InvalidBlibFile(\"Checksum does not match, file may be broken or have been altered\\n\"\r\n 'Run with \"skip_sha1\" to ignore checksum')\r\n else:\r\n raise BlibVersionError(\"File has incompatible version of blib\")\r\n else:\r\n raise BlibTypeError(\"File is not a valid Cycles material\")\r\n try:\r\n xml_file = archive.open(\"structure.xml\", 'r')\r\n except KeyError:\r\n raise InvalidBlibFile(\"File is broken, missing structure XML\")\r\n tree = ET.ElementTree(file=xml_file)\r\n xml_file.close()\r\n xroot = tree.getroot()\r\n \r\n elif path.splitext(filepath)[1] == \".xml\":\r\n tree = ET.ElementTree(file=filepath)\r\n xroot = tree.getroot()\r\n blib = False\r\n xversion = Version(xroot.attrib[\"compatible\"])\r\n if xversion > version:\r\n raise BlibVersionError(\"File has incompatible version of blib\")\r\n \r\n else:\r\n raise InvalidBlibFile(\"File is not a Blender library\")\r\n \r\n if xroot.tag != \"blib\":\r\n raise InvalidBlibFile(\"File is not a Blender library\")\r\n \r\n if xroot.attrib[\"type\"] != \"cycles\":\r\n raise BlibTypeError(\"File is not a valid Cycles material\")\r\n \r\n failed = {}\r\n imgs = {}\r\n txts = {}\r\n txt_paths = {}\r\n grps = {}\r\n scripts = {}\r\n resources = {\r\n \"images\": imgs,\r\n \"texts\": txts,\r\n \"text_paths\": txt_paths,\r\n \"groups\": grps,\r\n \"scripts\": scripts,\r\n }\r\n txt_dir = ResourceDir(\"texts\", resource_path)\r\n xres = xroot.find(\"resources\")\r\n \r\n #Import resources\r\n if xres is not None:\r\n ximgs = xres.find(\"images\")\r\n xtxts = xres.find(\"texts\")\r\n xgrps = xres.find(\"groups\")\r\n tmp_path = ResourceDir(\"tmp\", resource_path)\r\n path_dict = {}\r\n \r\n #Images\r\n if ximgs is not None and (imgi_import or imge_import or seq_import or mov_import) and blib:\r\n img_dir = ResourceDir(\"images\", resource_path)\r\n hash_dict = None\r\n sfv_update = False\r\n for ximg in ximgs:\r\n if ximg.attrib[\"source\"] in {'FILE', 'GENERATED'}:\r\n if ximg.attrib[\"origin\"] == \"internal\":\r\n if not imgi_import:\r\n pass\r\n else:\r\n if not imge_import:\r\n pass\r\n elif ximg.attrib[\"source\"] == 'SEQUENCE':\r\n if not seq_import:\r\n pass\r\n elif ximg.attrib[\"source\"] == 'MOVIE':\r\n if not mov_import:\r\n pass\r\n \r\n #Write image to temporary folder, and pack in Blender\r\n if ximg.attrib[\"source\"] in {'FILE', 'GENERATED'} and (img_embed or (img_embed is None and ximg.attrib[\"origin\"] == \"internal\")):\r\n ipath = extract_image(archive, ximg.attrib[\"path\"], str(tmp_path), path_dict, failed)\r\n if ipath is None:\r\n pass\r\n \r\n try:\r\n img = bpy.data.images.load(ipath)\r\n except:\r\n fail(failed, \"images\", \"import image '{}', unknown reason\".format(ximg.attrib[\"path\"]))\r\n else:\r\n img.source = ximg.attrib[\"source\"]\r\n try:\r\n img.pack()\r\n except:\r\n bpy.data.images.remove(img)\r\n fail(failed, \"images\", \"pack image '{}', unknown reason\".format(ximg.attrib[\"path\"]))\r\n else:\r\n img.filepath = \"\"\r\n imgs[ximg.attrib[\"name\"]] = img\r\n \r\n else: #Write image to resource folder, and load in Blender\r\n if img_merge and ximg.attrib[\"source\"] != 'SEQUENCE': #Use existing image in resources if available\r\n try:\r\n comment = archive.getinfo(ximg.attrib[\"path\"]).comment.decode(\"utf-8\")\r\n except KeyError:\r\n fail(failed, \"images\", \"import image '{}', file is missing\".format(ximg.attrib[\"path\"]))\r\n pass\r\n \r\n com_path = path_dict[comment] if comment != \"\" else \"\"\r\n com_name = path.basename(path.dirname(com_path))\r\n if comment != \"\" and com_name != \"tmp\":\r\n ipath = com_path\r\n path_dict[ximg.attrib[\"path\"]] = ipath\r\n else:\r\n #Create hash dictionary only in the first iteration\r\n if hash_dict is None:\r\n hash_path = path.join(img_dir.root, \"list.sfv\")\r\n hash_dict = {}\r\n if path.isfile(hash_path):\r\n sfv = re.compile(r\"(.*) (.*?)$\")\r\n hash_file = open(hash_path, 'r', encoding=\"utf-8\")\r\n for line in hash_file:\r\n key = sfv.sub(r\"\\2\", line).strip()\r\n val = sfv.sub(r\"\\1\", line).strip()\r\n if key in hash_dict and val in hash_dict[key]:\r\n sfv_update = True\r\n else:\r\n hash_dict.setdefault(key, []).append(val)\r\n hash_file.close()\r\n hash_bkp = hash_dict.copy()\r\n \r\n #Check if files match and set path to appropriate image\r\n img_path = ximg.attrib[\"path\"] if comment == \"\" else comment\r\n try:\r\n crc = format(archive.getinfo(img_path).CRC, 'x')\r\n except KeyError:\r\n fail(failed, \"images\", \"import image '{}', file is missing\".format(ximg.attrib[\"path\"]))\r\n pass\r\n \r\n if crc in hash_dict:\r\n i = 0\r\n while i < len(hash_dict[crc]):\r\n val = hash_dict[crc][i]\r\n fpath = path.join(img_dir.root, val)\r\n if path.isfile(fpath):\r\n fsize = path.getsize(fpath)\r\n zsize = archive.getinfo(img_path).file_size\r\n if fsize == zsize:\r\n ffile = open(fpath, 'rb')\r\n zfile = archive.open(img_path, 'r')\r\n if files_equal(ffile, zfile):\r\n ipath = fpath\r\n path_dict[ximg.attrib[\"path\"]] = ipath\r\n ffile.close()\r\n zfile.close()\r\n break\r\n ffile.close()\r\n zfile.close()\r\n else:\r\n hash_dict[crc].remove(val)\r\n i -= 1\r\n i += 1\r\n else:\r\n ipath = extract_image(archive, ximg.attrib[\"path\"], str(img_dir), path_dict, failed)\r\n if ipath is None:\r\n pass\r\n \r\n hash_dict[crc].append(path.relpath(ipath, img_dir.root))\r\n else:\r\n ipath = extract_image(archive, ximg.attrib[\"path\"], str(img_dir), path_dict, failed)\r\n if ipath is None:\r\n pass\r\n \r\n hash_dict[crc] = [path.relpath(ipath, img_dir.root)]\r\n else: #Use image in archive, even if duplicate\r\n if ximg.attrib[\"source\"] == 'SEQUENCE':\r\n seq_dir = path.dirname(ximg.attrib[\"path\"])\r\n dir_name = ximg.attrib[\"path\"].split(\"/\")[-2]\r\n seq_path = path.join(str(img_dir), dir_name)\r\n makedirs(seq_path)\r\n seq_imgs = [img for img in archive.namelist() if img.startswith(seq_dir)]\r\n for img in seq_imgs:\r\n i_tmp_path = extract_image(archive, img, seq_path, path_dict, failed)\r\n if img == ximg.attrib[\"path\"]:\r\n ipath = i_tmp_path\r\n if ipath is None:\r\n break\r\n if ipath is None:\r\n rmtree(seq_path)\r\n pass\r\n else:\r\n ipath = extract_image(archive, ximg.attrib[\"path\"], str(img_dir), path_dict, failed)\r\n if ipath is None:\r\n pass\r\n \r\n #load image to Blender\r\n try:\r\n img = bpy.data.images.load(ipath)\r\n except:\r\n fail(failed, \"images\", \"import image '{}', unknown reason\".format(ximg.attrib[\"path\"]))\r\n else:\r\n img.source = ximg.attrib[\"source\"]\r\n imgs[ximg.attrib[\"name\"]] = img\r\n \r\n if tmp_path:\r\n for item in listdir(str(tmp_path)):\r\n fpath = path.join(str(tmp_path), item)\r\n if path.isfile(fpath):\r\n remove(fpath)\r\n \r\n #Update hash file if list has changed\r\n if hash_dict is not None and (hash_dict != hash_bkp or sfv_update):\r\n hash_path = path.join(img_dir.root, \"list.sfv\")\r\n hash_file = open(hash_path, 'w', encoding=\"utf-8\")\r\n for key in hash_dict:\r\n for val in hash_dict[key]:\r\n hash_file.write(val + \" \" + key + \"\\n\")\r\n hash_file.close()\r\n \r\n #Texts\r\n if xtxts is not None and (txti_import or txte_import):\r\n for xtxt in xtxts:\r\n if xtxt.attrib[\"origin\"] == \"internal\":\r\n if txti_import:\r\n if \"path\" in xtxt.attrib:\r\n if blib:\r\n if txt_embed == False:\r\n import_texts(\"zip\", \"ext\", xtxt, txts, failed, archive, txt_dir)\r\n else:\r\n import_texts(\"zip\", \"int\", xtxt, txts, failed, archive, txt_dir)\r\n else:\r\n if txt_embed == False:\r\n import_texts(\"xml\", \"ext\", xtxt, txts, failed, None, txt_dir)\r\n else:\r\n import_texts(\"xml\", \"int\", xtxt, txts, failed, None, txt_dir)\r\n \r\n else:\r\n if txte_import:\r\n if \"path\" in xtxt.attrib:\r\n if blib:\r\n if txt_embed == True:\r\n import_texts(\"zip\", \"int\", xtxt, txts, failed, archive, txt_dir, txt_paths)\r\n else:\r\n import_texts(\"zip\", \"ext\", xtxt, txts, failed, archive, txt_dir, txt_paths)\r\n else:\r\n if txt_embed == True:\r\n import_texts(\"xml\", \"int\", xtxt, txts, failed, None, txt_dir, txt_paths)\r\n else:\r\n import_texts(\"xml\", \"ext\", xtxt, txts, failed, None, txt_dir, txt_paths)\r\n \r\n #Groups\r\n if xgrps is not None:\r\n for xgrp in xgrps:\r\n xnodes = xgrp.find(\"nodes\")\r\n xlinks = xgrp.find(\"links\")\r\n grp = bpy.data.node_groups.new(xgrp.attrib[\"name\"], xgrp.attrib[\"bl_idname\"])\r\n grps[xgrp.attrib[\"name\"]] = grp\r\n if xnodes is not None:\r\n build_tree(xnodes, xlinks, grp, resources, txt_embed, txt_dir, blib, script_import, archive, failed)\r\n \r\n #Import material\r\n xmat = xroot.find(\"main\")\r\n \r\n if xmat is not None:\r\n xcycles = xmat.find(\"cycles_settings\")\r\n xnodes = xmat.find(\"nodes\")\r\n xlinks = xmat.find(\"links\")\r\n \r\n mat = bpy.data.materials.new(xmat.attrib[\"name\"])\r\n set_attributes(mat, xmat, failed)\r\n set_attributes(mat.cycles, xcycles, failed)\r\n mat.use_nodes = True\r\n mat.node_tree.nodes.clear()\r\n build_tree(xnodes, xlinks, mat.node_tree, resources, txt_embed, txt_dir, blib, script_import, archive, failed)\r\n if blib:\r\n archive.close()\r\n for f in failed:\r\n print(\"{} {} failed to be imported/assigned.\".format(failed[f], f))\r\n return mat\r\n else:\r\n if blib:\r\n archive.close()\r\n for f in failed:\r\n print(\"{} {} failed to be imported/assigned.\".format(failed[f], f))\r\n return grp", "def test_sequence_outside_range(self):\n seqs = [-1, 100]\n for seq in seqs:\n with self.subTest(seq=seq):\n with self.assertRaisesRegex(ValueError, str(seq)):\n star_barcode.construct_postscript(\n sequence=seq,\n bwipp_location=self.bwipp,\n issn=self.issn,\n week=46,\n header_line=''\n )", "def setup_b_instance(self,norm,add_ps_mask=True):\n inst_tag = self.tag + '_'+str(self.flux_array_ebin)\n b = bsm.bayesian_scan_NPTF(tag=inst_tag,nside=self.nside,work_dir='/tmp/'+self.tag+'/',psf_dir=psf_dir,nlive=700)\n # Input the data, using the external data if provided\n if self.use_external_data:\n b.load_external_data(self.f1.CTB_en_bins,[self.external_data[self.flux_array_ebin]],self.f1.CTB_exposure_maps)\n else:\n b.load_external_data(self.f1.CTB_en_bins,self.f1.CTB_count_maps,self.f1.CTB_exposure_maps)\n\n if add_ps_mask:\n b.make_mask_total(band_mask_range = [-self.band_mask,self.band_mask],mask_ring = False,ps_mask_array = self.f1.ps_mask_array)\n else:\n b.make_mask_total(band_mask_range = [-self.band_mask,self.band_mask],mask_ring = False)\n\n b.add_new_template(self.f1.template_dict)\n b.rebin_external_data(1)\n\n b.add_poiss_model('ps_model','$A_{ps}$',[0.0,3.0],False)\n b.add_poiss_model('p7','$A_{p7}$',[0.0,2.0],False)\n b.add_poiss_model('bubs','$A_{bubs}$',[0.0,2.0],False)\n b.add_poiss_model('iso','$A_{iso}$',[0.0,3.0],False)\n # Add in a fixed J_map template\n b.add_fixed_templates({'J_map':[norm*self.J_map_arr[self.flux_array_ebin]/np.mean(self.J_map_arr[self.flux_array_ebin])]})\n\n b.initiate_poissonian_edep()\n return b", "def _create_execute_psiblast(execute_command):\n\n def execute_psiblast(input_file: str, output_file: str, database: str):\n output_format = \"6 sallseqid qcovs pident\"\n cmd = \"{} < {} -db {} -outfmt '{}' -evalue 1e-5 > {}\".format(\n PSIBLAST_CMD, input_file, database, output_format, output_file)\n execute_command(cmd)\n\n return execute_psiblast", "def _make_psth(dataset, eval_mask, ignore_mask, make_params, cond_fields, kern_sd, pad='back', seg_len=None, skip_mask=None):\n # Reload dataset and smooth spikes\n bin_width = dataset.bin_width\n dataset = NWBDataset(dataset.fpath, dataset.prefix, skip_fields=['force', 'hand_pos', 'hand_vel', 'finger_pos', 'finger_vel', 'eye_pos', 'cursor_pos', 'muscle_len', 'muscle_vel', 'joint_ang', 'joint_vel'])\n if kern_sd > 50:\n dataset.smooth_spk(kern_sd, signal_type=['spikes', 'heldout_spikes'], overwrite=True, ignore_nans=True)\n else:\n dataset.smooth_spk(kern_sd, signal_type=['spikes', 'heldout_spikes'], overwrite=True)\n if bin_width != 1:\n dataset.resample(bin_width)\n\n # Make mask for valid trials to skip for PSTH calculation\n if skip_mask is not None:\n if callable(skip_mask):\n skip_mask = skip_mask(dataset.trial_info)\n else:\n skip_mask = np.full(len(dataset.trial_info), False)\n skip_ids = dataset.trial_info[skip_mask].trial_id.to_numpy()\n\n num_neur = len(dataset.data[['spikes', 'heldout_spikes']].columns)\n eval_trials = dataset.trial_info[eval_mask]\n psth_list = []\n for idx, row in eval_trials.iterrows():\n if pd.isna(row[make_params.get('align_field', 'start_time')]):\n continue\n if row.trial_id in skip_ids:\n psth_list.append(np.full((0, num_neur), np.nan))\n continue\n leaveout_mask = (dataset.trial_info.trial_id == row.trial_id)\n comb = row[cond_fields]\n mask = np.all(dataset.trial_info[cond_fields] == comb, axis=1)\n if not np.any(mask & (~ignore_mask) & (~skip_mask) & (~leaveout_mask)):\n logger.warning(f\"Not enough trials to compute PSTH for trial {row.trial_id}\")\n psth_list.append(np.full((0, num_neur), np.nan))\n continue\n trial_data = dataset.make_trial_data(ignored_trials=(~mask | ignore_mask | skip_mask | leaveout_mask), **make_params)\n min_len = np.min([trial.shape[0] for tid, trial in trial_data.groupby('trial_id')])\n psth = trial_data.groupby('align_time')[trial_data[['spikes', 'heldout_spikes']].columns].mean().to_numpy()\n psth = psth[:min_len] if pad == 'back' else psth[-min_len:]\n psth_list.append(psth)\n\n max_len = np.max([psth.shape[0] for psth in psth_list]) if seg_len is None else int(round(seg_len / dataset.bin_width))\n psth = np.vstack([psth if psth.shape[0] == max_len\n else psth[:max_len] if (psth.shape[0] > max_len and pad == 'back')\n else psth[max_len:] if (psth.shape[0] > max_len and pad == 'front')\n else np.concatenate([psth, np.full((max_len - psth.shape[0], psth.shape[1]), np.nan)], axis=0) if pad == 'back'\n else np.concatenate([np.full((max_len - psth.shape[0], psth.shape[1]), np.nan), psth], axis=0)\n for psth in psth_list])\n \n return psth", "def setupInputCopies(self,p,workinplace = False ):\n\n _img_root,_img_extn = fileutil.parseFilename(p['data'])\n\n if not workinplace:\n # Make copies of input images\n _copy = modifyRootname(_img_root)\n\n # Update parlist entries with pointers to new filename\n p['orig_filename'] = _copy\n else:\n p['orig_filename'] = _img_root", "def _validate_scripts(self):\n if \"scripts\" in self.params:\n self.params[\"scripts\"] = Path(self.workflow_path) / self.params[\"scripts\"]\n else:\n self.params[\"scripts\"] = self.workflow_path / \"scripts\"\n if not self.params[\"scripts\"].exists():\n raise Exception(f\"{self.params['scripts']} doesnt exist\")", "def _create_pbs_file(self, jobname, num_jobs, pf_pbs, pf_input_package_template, pf_output_package_template):\n\n # create unique compute directory\n pd_compute = None # run_shell_cmd(\"mktemp --tmpdir={}\".format(self._prl_options[\"pbs-pd-root-compute\"]))\n\n pbs_text = PBS._generate_pbs_header_array(num_jobs, jobname, self._prl_options, pd_compute=pd_compute)\n\n pbs_text += \"\\n{}\\n\".format(\n PBS._generate_call_command(self._env,\n pf_input_package_template,\n pf_output_package_template,\n self._prl_options,\n pd_compute=pd_compute\n )\n )\n\n # write to file\n write_to_file(pbs_text, pf_pbs)", "def init_boldpostprocess_wf(\n lower_bpf,\n upper_bpf,\n contigvol,\n bpf_order,\n motion_filter_order,\n motion_filter_type,\n band_stop_min,\n band_stop_max,\n smoothing,\n bold_file,\n head_radius,\n params,\n custom_conf,\n omp_nthreads,\n dummytime,\n output_dir,\n fd_thresh,\n num_bold,\n mni_to_t1w,\n despike,\n brain_template='MNI152NLin2009cAsym',\n layout=None,\n name='bold_postprocess_wf',\n ):\n\n\n TR = layout.get_tr(bold_file)\n file_base = os.path.basename(str(bold_file))\n workflow = Workflow(name=name)\n\n workflow.__desc__ = \"\"\"\nFor each of the {num_bold} BOLD series found per subject (across all\ntasks and sessions), the following post-processing was performed:\n\"\"\".format(num_bold=num2words(num_bold))\n\n if dummytime > 0:\n nvolx = str(np.floor(dummytime / TR))\n workflow.__desc__ = workflow.__desc__ + \"\"\" \\\nbefore nuisance regression and filtering of the data, the first {nvol} were discarded,\n.Furthermore,volumes with framewise-displacement greater than \n{fd_thresh} mm [@power_fd_dvars;@satterthwaite_2013] were flagged as outliers\n and excluded from nuisance regression.\n\"\"\".format(nvol=num2words(nvolx),fd_thresh=fd_thresh)\n\n else:\n workflow.__desc__ = workflow.__desc__ + \"\"\" \\\nbefore nuisance regression and filtering of the data, volumes with framewise-displacement greater than \n{fd_thresh} mm [@power_fd_dvars;@satterthwaite_2013] were flagged as outliers\n and excluded from nuisance regression.\n\"\"\".format(fd_thresh=fd_thresh)\n\n workflow.__desc__ = workflow.__desc__ + \"\"\" \\\n{regressors} [@benchmarkp;@satterthwaite_2013]. These nuisance regressors were \nregressed from the BOLD data using linear regression - as implemented in Scikit-Learn {sclver} [@scikit-learn].\nResidual timeseries from this regression were then band-pass filtered to retain signals within the {highpass}-{lowpass} Hz frequency band. \n \"\"\".format(regressors=stringforparams(params=params),sclver=sklearn.__version__,\n lowpass=upper_bpf,highpass=lower_bpf)\n\n\n # get reference and mask\n mask_file,ref_file = _get_ref_mask(fname=bold_file)\n\n inputnode = pe.Node(niu.IdentityInterface(\n fields=['bold_file','ref_file','bold_mask','cutstom_conf','mni_to_t1w','t1w','t1seg']),\n name='inputnode')\n\n inputnode.inputs.bold_file = str(bold_file)\n inputnode.inputs.ref_file = str(ref_file)\n inputnode.inputs.bold_mask = str(mask_file)\n inputnode.inputs.custom_conf = str(custom_conf)\n\n\n outputnode = pe.Node(niu.IdentityInterface(\n fields=['processed_bold', 'smoothed_bold','alff_out','smoothed_alff',\n 'reho_out','sc217_ts', 'sc217_fc','sc417_ts','sc417_fc','ts50_ts','ts50_fc',\n 'gs360_ts', 'gs360_fc','gd333_ts', 'gd333_fc','qc_file','fd']),\n name='outputnode')\n\n mem_gbx = _create_mem_gb(bold_file)\n\n\n fcon_ts_wf = init_fcon_ts_wf(mem_gb=mem_gbx['timeseries'],mni_to_t1w=mni_to_t1w,\n t1w_to_native=_t12native(bold_file),bold_file=bold_file,\n brain_template=brain_template,name=\"fcons_ts_wf\")\n\n alff_compute_wf = init_compute_alff_wf(mem_gb=mem_gbx['timeseries'], TR=TR,\n lowpass=upper_bpf,highpass=lower_bpf,smoothing=smoothing, cifti=False,\n name=\"compute_alff_wf\" )\n\n reho_compute_wf = init_3d_reho_wf(mem_gb=mem_gbx['timeseries'],smoothing=smoothing,\n name=\"afni_reho_wf\")\n\n write_derivative_wf = init_writederivatives_wf(smoothing=smoothing,bold_file=bold_file,\n params=params,cifti=None,output_dir=output_dir,dummytime=dummytime,\n lowpass=upper_bpf,highpass=lower_bpf,TR=TR,omp_nthreads=omp_nthreads,\n name=\"write_derivative_wf\")\n\n confoundmat_wf = pe.Node(ConfoundMatrix(head_radius=head_radius, params=params,\n filtertype=motion_filter_type,cutoff=band_stop_max,\n low_freq=band_stop_max,high_freq=band_stop_min,TR=TR,\n filterorder=motion_filter_order),\n name=\"ConfoundMatrix_wf\", mem_gb=mem_gbx['resampled'])\n\n censorscrub_wf = init_censoring_wf(mem_gb=mem_gbx['timeseries'],TR=TR,custom_conf=custom_conf,head_radius=head_radius,\n contigvol=contigvol,dummytime=dummytime,fd_thresh=fd_thresh,name='censoring')\n \n resdsmoothing_wf = init_resd_smoohthing(mem_gb=mem_gbx['timeseries'],smoothing=smoothing,cifti=False,\n name=\"resd_smoothing_wf\")\n \n filtering_wf = pe.Node(FilteringData(tr=TR,lowpass=upper_bpf,highpass=lower_bpf,\n filter_order=bpf_order),\n name=\"filtering_wf\", mem_gb=mem_gbx['timeseries'])\n\n regression_wf = pe.Node(regress(tr=TR),\n name=\"regression_wf\",mem_gb = mem_gbx['timeseries'])\n\n interpolate_wf = pe.Node(interpolate(TR=TR),\n name=\"interpolation_wf\",mem_gb = mem_gbx['timeseries'])\n\n \n executivesummary_wf =init_execsummary_wf(tr=TR,bold_file=bold_file,layout=layout,\n output_dir=output_dir,mni_to_t1w=mni_to_t1w,omp_nthreads=2)\n \n\n # get transform file for resampling and fcon\n \n \n \n transformfile = get_transformfile(bold_file=bold_file,\n mni_to_t1w=mni_to_t1w,t1w_to_native=_t12native(bold_file))\n t1w_mask = get_maskfiles(bold_file=bold_file,mni_to_t1w=mni_to_t1w)[1]\n\n bold2MNI_trans,bold2T1w_trans = get_transformfilex(bold_file=bold_file,\n mni_to_t1w=mni_to_t1w,t1w_to_native=_t12native(bold_file)) \n\n \n resample_parc = pe.Node(ApplyTransforms(\n dimension=3,\n input_image=str(get_template(\n 'MNI152NLin2009cAsym', resolution=1, desc='carpet',\n suffix='dseg', extension=['.nii', '.nii.gz'])),\n interpolation='MultiLabel',transforms=transformfile),\n name='resample_parc')\n \n resample_bold2T1w = pe.Node(ApplyTransforms(\n dimension=3,\n input_image=mask_file,reference_image=t1w_mask,\n interpolation='NearestNeighbor',transforms=bold2T1w_trans),\n name='bold2t1_trans')\n \n resample_bold2MNI = pe.Node(ApplyTransforms(\n dimension=3,\n input_image=mask_file,reference_image=str(get_template(\n 'MNI152NLin2009cAsym', resolution=2, desc='brain',\n suffix='mask', extension=['.nii', '.nii.gz'])),\n interpolation='NearestNeighbor',transforms=bold2MNI_trans),\n name='bold2mni_trans')\n\n qcreport = pe.Node(computeqcplot(TR=TR,bold_file=bold_file,dummytime=dummytime,t1w_mask=t1w_mask,\n template_mask = str(get_template('MNI152NLin2009cAsym', resolution=2, desc='brain',\n suffix='mask', extension=['.nii', '.nii.gz'])),\n head_radius=head_radius), name=\"qc_report\",mem_gb = mem_gbx['resampled'])\n \n\n workflow.connect([\n # connect bold confound matrix to extract confound matrix \n (inputnode, confoundmat_wf, [('bold_file', 'in_file'),]),\n ])\n \n # if there is despiking\n if despike:\n despike_wf = pe.Node(Despike(outputtype='NIFTI_GZ',args='-NEW'),name=\"despike_wf\",mem_gb=mem_gbx['resampled'])\n\n workflow.connect([\n (inputnode,despike_wf,[('bold_file','in_file')]),\n (despike_wf,censorscrub_wf,[('out_file','inputnode.bold')])\n ])\n else:\n workflow.connect([\n (inputnode,censorscrub_wf,[('bold_file','inputnode.bold')]),\n ])\n \n # add neccessary input for censoring if there is one\n workflow.connect([\n\t (inputnode,censorscrub_wf,[('bold_file','inputnode.bold_file'),\n\t ('bold_mask','inputnode.bold_mask')]),\n\t (confoundmat_wf,censorscrub_wf,[('confound_file','inputnode.confound_file')])\n ])\n\n # regression workflow \n workflow.connect([\n\t (inputnode,regression_wf,[('bold_mask','mask')]),\n\t (censorscrub_wf,regression_wf,[('outputnode.bold_censored','in_file'),\n\t ('outputnode.fmriprepconf_censored','confounds'), \n\t\t ('outputnode.customconf_censored','custom_conf')])\n ])\n # interpolation workflow\n workflow.connect([\n\t (inputnode,interpolate_wf,[('bold_file','bold_file'),('bold_mask','mask_file')]),\n\t (censorscrub_wf,interpolate_wf,[('outputnode.tmask','tmask')]),\n\t (regression_wf,interpolate_wf,[('res_file','in_file')]), \n\t])\n # add filtering workflow \n workflow.connect([\n (inputnode,filtering_wf,[('bold_mask','mask')]),\n\t (interpolate_wf,filtering_wf,[('bold_interpolated','in_file')]),\n\n ])\n \n # residual smoothing \n workflow.connect([\n\t (filtering_wf,resdsmoothing_wf,[('filt_file','inputnode.bold_file')]) \n ])\n\n #functional connect workflow\n workflow.connect([\n (inputnode,fcon_ts_wf,[('ref_file','inputnode.ref_file'),]),\n (filtering_wf,fcon_ts_wf,[('filt_file','inputnode.clean_bold'),]),\n ])\n # reho and alff\n workflow.connect([ \n\t (inputnode,alff_compute_wf,[('bold_mask','inputnode.bold_mask')]),\n\t (inputnode,reho_compute_wf,[('bold_mask','inputnode.bold_mask')]),\n\t (filtering_wf, alff_compute_wf,[('filt_file','inputnode.clean_bold')]),\n\t (filtering_wf, reho_compute_wf,[('filt_file','inputnode.clean_bold')]),\n ])\n\n # qc report\n workflow.connect([\n (inputnode,qcreport,[('bold_mask','mask_file')]),\n (filtering_wf,qcreport,[('filt_file','cleaned_file')]),\n (censorscrub_wf,qcreport,[('outputnode.tmask','tmask')]),\n (inputnode,resample_parc,[('ref_file','reference_image')]),\n (resample_parc,qcreport,[('output_image','seg_file')]),\n (resample_bold2T1w,qcreport,[('output_image','bold2T1w_mask')]),\n (resample_bold2MNI,qcreport,[('output_image','bold2temp_mask')]),\n (qcreport,outputnode,[('qc_file','qc_file')]),\n ])\n\n \n\n # write to the outputnode, may be use in future\n workflow.connect([\n\t(filtering_wf,outputnode,[('filt_file','processed_bold')]),\n\t(censorscrub_wf,outputnode,[('outputnode.fd','fd')]),\n\t(resdsmoothing_wf,outputnode,[('outputnode.smoothed_bold','smoothed_bold')]),\n\t(alff_compute_wf,outputnode,[('outputnode.alff_out','alff_out'),\n ('outputnode.smoothed_alff','smoothed_alff')]),\n (reho_compute_wf,outputnode,[('outputnode.reho_out','reho_out')]),\n\t (fcon_ts_wf,outputnode,[('outputnode.sc217_ts','sc217_ts' ),('outputnode.sc217_fc','sc217_fc'),\n ('outputnode.sc417_ts','sc417_ts'),('outputnode.sc417_fc','sc417_fc'),\n ('outputnode.gs360_ts','gs360_ts'),('outputnode.gs360_fc','gs360_fc'),\n ('outputnode.gd333_ts','gd333_ts'),('outputnode.gd333_fc','gd333_fc'),\n ('outputnode.ts50_ts','ts50_ts'),('outputnode.ts50_fc','ts50_fc')]),\n\n ])\n \n # write derivatives \n workflow.connect([\n (filtering_wf,write_derivative_wf,[('filt_file','inputnode.processed_bold')]),\n\t (resdsmoothing_wf,write_derivative_wf,[('outputnode.smoothed_bold','inputnode.smoothed_bold')]),\n (censorscrub_wf,write_derivative_wf,[('outputnode.fd','inputnode.fd')]),\n (alff_compute_wf,write_derivative_wf,[('outputnode.alff_out','inputnode.alff_out'),\n ('outputnode.smoothed_alff','inputnode.smoothed_alff')]),\n (reho_compute_wf,write_derivative_wf,[('outputnode.reho_out','inputnode.reho_out')]),\n (fcon_ts_wf,write_derivative_wf,[('outputnode.sc217_ts','inputnode.sc217_ts' ),\n ('outputnode.sc217_fc','inputnode.sc217_fc'),\n ('outputnode.sc417_ts','inputnode.sc417_ts'),\n ('outputnode.sc417_fc','inputnode.sc417_fc'),\n ('outputnode.gs360_ts','inputnode.gs360_ts'),\n ('outputnode.gs360_fc','inputnode.gs360_fc'),\n ('outputnode.gd333_ts','inputnode.gd333_ts'),\n ('outputnode.gd333_fc','inputnode.gd333_fc'),\n ('outputnode.ts50_ts','inputnode.ts50_ts'),\n ('outputnode.ts50_fc','inputnode.ts50_fc')]),\n (qcreport,write_derivative_wf,[('qc_file','inputnode.qc_file')]),\n\n\n\n ])\n functional_qc = pe.Node(FunctionalSummary(bold_file=bold_file,tr=TR),\n name='qcsummary', run_without_submitting=True)\n\n ds_report_qualitycontrol = pe.Node(\n DerivativesDataSink(base_directory=output_dir, desc='qualitycontrol',source_file=bold_file, datatype=\"figures\"),\n name='ds_report_qualitycontrol', run_without_submitting=True)\n\n ds_report_preprocessing = pe.Node(\n DerivativesDataSink(base_directory=output_dir, desc='preprocessing',source_file=bold_file, datatype=\"figures\"),\n name='ds_report_preprocessing', run_without_submitting=True)\n ds_report_postprocessing = pe.Node(\n DerivativesDataSink(base_directory=output_dir,source_file=bold_file, desc='postprocessing', datatype=\"figures\"),\n name='ds_report_postprocessing', run_without_submitting=True)\n\n ds_report_connectivity = pe.Node(\n DerivativesDataSink(base_directory=output_dir,source_file=bold_file, desc='connectvityplot', datatype=\"figures\"),\n name='ds_report_connectivity', run_without_submitting=True)\n\n ds_report_rehoplot = pe.Node(\n DerivativesDataSink(base_directory=output_dir,source_file=bold_file, desc='rehoplot', datatype=\"figures\"),\n name='ds_report_rehoplot', run_without_submitting=True)\n\n ds_report_afniplot = pe.Node(\n DerivativesDataSink(base_directory=output_dir,source_file=bold_file, desc='afniplot', datatype=\"figures\"),\n name='ds_report_afniplot', run_without_submitting=True)\n\n workflow.connect([\n (qcreport,ds_report_preprocessing,[('raw_qcplot','in_file')]),\n (qcreport,ds_report_postprocessing ,[('clean_qcplot','in_file')]),\n (qcreport,functional_qc,[('qc_file','qc_file')]),\n (functional_qc,ds_report_qualitycontrol,[('out_report','in_file')]),\n (fcon_ts_wf,ds_report_connectivity,[('outputnode.connectplot','in_file')]),\n (reho_compute_wf,ds_report_rehoplot,[('outputnode.rehohtml','in_file')]),\n (alff_compute_wf,ds_report_afniplot ,[('outputnode.alffhtml','in_file')]),\n ])\n\n\n ## exexetive summary workflow\n workflow.connect([\n (inputnode,executivesummary_wf,[('t1w','inputnode.t1w'),('t1seg','inputnode.t1seg'),\n ('bold_file','inputnode.bold_file'),('bold_mask','inputnode.mask')]),\n\n (regression_wf,executivesummary_wf,[('res_file','inputnode.regdata'),]),\n (filtering_wf,executivesummary_wf,[('filt_file','inputnode.resddata')]),\n (censorscrub_wf,executivesummary_wf,[('outputnode.fd','inputnode.fd')]),\n ]),\n\n return workflow", "def prepare_pr_condor_job(self, pool_type, pool_address, number_of_jobs, subtask_index, data_files, rank='0', extraArgs=''):\n ############\n copasi_file = 'auto_copasi_%d.$(Process).cps' % subtask_index\n output_file = 'output_%d.$(Process).txt' % subtask_index\n \n \n \n if pool_type == 'ec2':\n binary_dir = '/usr/local/bin'\n transfer_executable = 'NO'\n else:\n binary_dir, binary = os.path.split(settings.COPASI_LOCAL_BINARY)\n transfer_executable = 'YES'\n \n input_files_string = ', '\n for data_file in data_files:\n input_files_string += (data_file + ', ')\n input_files_string = input_files_string.rstrip(', ')\n\n condor_job_string = Template(condor_spec.raw_condor_job_string).substitute(copasiFile=copasi_file, \n otherFiles=input_files_string,\n rank=rank,\n binary_dir = binary_dir,\n transfer_executable = transfer_executable,\n pool_type = pool_type,\n pool_address = pool_address,\n subtask=str(subtask_index),\n n = number_of_jobs,\n outputFile = output_file,\n extraArgs='',\n )\n \n condor_job_filename = 'auto_condor_%d.job'%subtask_index\n condor_job_full_filename = os.path.join(self.path, condor_job_filename)\n condor_file = open(condor_job_full_filename, 'w')\n condor_file.write(condor_job_string)\n condor_file.close()\n\n return condor_job_filename", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n author: Optional[pulumi.Input[str]] = None,\n galleries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['WorkbookTemplateGalleryArgs']]]]] = None,\n localized: Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['WorkbookTemplateLocalizedGalleryArgs']]]]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n priority: Optional[pulumi.Input[int]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n resource_name_: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n template_data: Optional[Any] = None,\n __props__=None):\n ...", "def test_hooks_missing_script(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n \"\"\"\n image: na\n hooks:\n user:\n not_script: missing \"script\" key\n \"\"\"\n )\n\n self._invalid_config()", "def test_init_Error_b_wrong_shape(self):\n with self.assertRaises(ValueError):\n nbh.Polytope(b=[1, 2, 3])", "def __init__(self, crop_no, sim_length, book, soil):\r\n sheet_inputs = book.sheet_by_name('inputs')\r\n # Campbell max canopy transpiration, mm/d:\r\n self.campbell_max_daily_transp = sheet_inputs.cell(6, crop_no).value\r\n # DSSAT max water uptake, cm3water/cm3root:\r\n self.dssat_max_water_uptake = sheet_inputs.cell(7, crop_no).value\r\n # Feddes stress threshold water potential for low T demand, J/kg\r\n self.P2L = sheet_inputs.cell(8, crop_no).value\r\n # Feddes stress threshold water potential for high T demand, J/kg\r\n self.P2H = sheet_inputs.cell(9, crop_no).value\r\n # Feddes high transpiration demand, mm/day\r\n self.R2H = sheet_inputs.cell(10, crop_no).value\r\n # Feddes low transpiration demand, mm/day\r\n self.R2L = sheet_inputs.cell(11, crop_no).value\r\n # Feddes alue of the pressure head, below which roots start to extract\r\n # water from the soil\r\n self.P0 = sheet_inputs.cell(12, crop_no).value\r\n #Campbell leaf water potential at onset of stomatal closure [J/kg]:\r\n self.leaf_water_pot_stress_onset = sheet_inputs.cell(13, crop_no).value\r\n #Campbell leaf water potential at wilting point [J/kg]:\r\n self.leaf_water_pot_wilt_point = sheet_inputs.cell(14, crop_no).value\r\n # EPIC water extraction distribution\r\n self.water_extraction_dist = sheet_inputs.cell(15, crop_no).value\r\n\r\n self.leaf_water_pot = 0 #J/kg\r\n self.sim_length = sim_length #d\r\n self.conductance = np.ones(soil.total_layers)\r\n self.water_uptake = np.zeros(soil.total_layers)\r\n self.leaf_water_potential = np.zeros(soil.total_layers)\r\n self.soil_water_pot_avg = 0\r\n self.transp_ratio = 0 # to quantify crop water stress\r\n self.crop_transp = 0\r\n self.pot_transp = 0\r\n self.att_transp = 0\r\n self.expect_transp = 0\r\n self.cum_transp = 0\r\n self.cum_pot_transp = 0\r\n self.root_dens = np.zeros(soil.total_layers) # m root / m3 soil\r\n self.root_fraction = np.zeros(soil.total_layers) #m root / m soil\r\n sheet_soil = book.sheet_by_name('soil')\r\n for lyr in soil.layers:\r\n self.root_dens[lyr] = sheet_soil.cell(9+lyr, 9).value\r\n self.root_fraction[lyr] = sheet_soil.cell(9+lyr, 10).value\r\n self.root_depth = sheet_soil.cell(3, 4).value", "def __call__(self, basepath: str, scriptpath: str) -> Process:\n ..." ]
[ "0.5087618", "0.43715426", "0.43431723", "0.41926655", "0.4133377", "0.4116569", "0.41082445", "0.4093432", "0.40924096", "0.40412655", "0.40265974", "0.4017204", "0.401558", "0.4012506", "0.39556316", "0.39462158", "0.39448643", "0.3938337", "0.3933324", "0.39185902", "0.3912965", "0.39013705", "0.38820532", "0.3855901", "0.38475254", "0.38355836", "0.38344893", "0.383062", "0.38276494", "0.3818761" ]
0.654025
0
construct_postscript raises ValueError for ISSN of incorrect length ISSNs are either 7 or 8 digits long (8 being the optional check digit), with a mandatory (for BWIPP) hyphen in the fifth place. construct_postscript should raise a ValueError if the ISSN is not of the form \d{4}\d{3,4}.
def test_issn_incorrect_length(self): issns = ['0307-15', '0307-15789', '03071758', '0307175'] for num in issns: with self.subTest(num=num): with self.assertRaisesRegex(ValueError, num): star_barcode.construct_postscript( issn=num, bwipp_location=self.bwipp, sequence=21, week=46, header_line='' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_typical(self):\n seq = 21\n week = 46\n header = 'MSTAR 2016-11-14 MON 1.0'\n issn_args = ' '.join([self.issn, str(seq), str(week)])\n result = star_barcode.construct_postscript(\n bwipp_location=self.bwipp,\n issn=self.issn,\n sequence=seq,\n week=week,\n header_line=header\n )\n self.assertGreater(result.find(str(self.bwipp)), -1)\n self.assertGreater(result.find(issn_args), -1)\n self.assertGreater(result.find(header), -1)", "def test_missing_bwipp(self):\n seq = 21\n week = 46\n header = 'MSTAR 2016-11-14 MON 1.0'\n with self.assertRaisesRegex(ValueError, 'BWIPP'):\n star_barcode.construct_postscript(\n bwipp_location=Path('/fake-path/not-here.ps'),\n issn=self.issn,\n sequence=seq,\n week=week,\n header_line=header\n )", "def test_sequence_outside_range(self):\n seqs = [-1, 100]\n for seq in seqs:\n with self.subTest(seq=seq):\n with self.assertRaisesRegex(ValueError, str(seq)):\n star_barcode.construct_postscript(\n sequence=seq,\n bwipp_location=self.bwipp,\n issn=self.issn,\n week=46,\n header_line=''\n )", "def validate_postcode_format(self):\n\n assert type(self.postcodes) == str, \"To use this method, the postcode cannot be an iterable.\"\n pcd = self.postcodes.replace(' ', '')\n # The following regular expression matches are in order to adhere to the rules for UK postcodes given in the\n # documentation.\n first_char_alpha = re.match(r'^[a-zA-Z]', pcd)\n last_char_match = re.match(r'[a-zA-Z]', pcd[-1])\n alpha_match = re.search(r'[a-zA-Z]', pcd)\n numeric_match = re.search(r'[0-9]', pcd)\n special_chars_match = re.search(r'[!#,£$%^&*¬-]', pcd)\n if len(pcd) == 0:\n response = 'Null'\n elif (5 <= len(pcd) <= 7) and first_char_alpha and alpha_match and numeric_match \\\n and last_char_match and not special_chars_match:\n response = 'Valid Postcode Format'\n else:\n response = 'Invalid Postcode Format'\n return response", "def test_week_wrong(self):\n weeks = [0, 54]\n for week in weeks:\n with self.subTest(week=week):\n with self.assertRaisesRegex(ValueError, str(week)):\n star_barcode.construct_postscript(\n week=week,\n bwipp_location=self.bwipp,\n issn=self.issn,\n sequence=21,\n header_line=''\n )", "def test_sequence_0_to_9(self):\n seqs = list(range(10))\n for seq in seqs:\n with self.subTest(seq=seq):\n result = star_barcode.construct_postscript(\n sequence=seq,\n bwipp_location=self.bwipp,\n issn=self.issn,\n week=20,\n header_line=''\n )\n self.assertGreater(\n result.find(f'{self.issn} {seq:02}'),\n -1\n )", "def validate_SSN(SSN_test):\n\n is_valid_SSN = False\n\n # if user breaks format but enters 9 digits, SSN is counted as valid\n if len(SSN_test) == 9 and SSN_test.isdigit():\n is_valid_SSN = True\n\n\n # otherwise, if the length is not 11 characters, and there aren't at least 2 dashes, entry immediately fails\n elif len(SSN_test) != 11 or (SSN_test.count(\"-\") != 2):\n pass\n\n # if the dashes are in the wrong place, entry fails\n elif (SSN_test[3] != \"-\") and (SSN_test[6] != \"-\"):\n pass\n\n # dashes are correct, but all other characters must be numbers\n else:\n valid_SSN1 = (SSN_test[0 : 3]).isdigit()\n valid_SSN2 = (SSN_test[4 : 6]).isdigit()\n valid_SSN3 = (SSN_test[7 : ]).isdigit()\n if (valid_SSN1 and valid_SSN2 and valid_SSN3):\n is_valid_SSN = True\n else:\n is_valid_SSN = False\n\n return is_valid_SSN", "def ssn(\n self,\n min_age: int = 18,\n max_age: int = 90,\n long: bool = False,\n dash: bool = True,\n ) -> str:\n\n age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))\n birthday = datetime.datetime.now() - age\n yr_fmt = \"%Y\" if long else \"%y\"\n pnr_date = f\"{birthday:{yr_fmt}%m%d}\"\n chk_date = pnr_date[2:] if long else pnr_date\n suffix = f\"{self.generator.random.randrange(0, 999):03}\"\n luhn_checksum = str(calculate_luhn(int(chk_date + suffix)))\n hyphen = \"-\" if dash else \"\"\n pnr = f\"{pnr_date}{hyphen}{suffix}{luhn_checksum}\"\n\n return pnr", "def validate_isbn_format(isbn_code: str):\n format_valid = False\n isbn = list(isbn_code)\n msj = ''\n\n if len(isbn) == 13:\n\n isbn_numbers = []\n isbn_separator = []\n index = 0\n isbn_characters = []\n\n for character in isbn:\n\n if character in '0123456789':\n isbn_numbers.append(character)\n\n elif character not in '0123456789':\n isbn_characters.append(character)\n\n if character == '-':\n isbn_separator.append(character)\n\n if index > 0:\n if isbn[index - 1] not in '0123456789':\n msj = 'Se ingresaron dos separadores juntos'\n break\n else:\n msj = 'Se ingresó un caracter inválido'\n break\n\n index += 1\n\n if len(isbn_numbers) < 10:\n msj = 'Faltan dígitos'\n\n if len(isbn_separator) != 3:\n msj = 'No son 4 grupos de números.'\n\n if len(isbn_separator) < 3:\n diff = 3 - len(isbn_separator)\n msj += ' Faltan ' + str(diff) + ' separadores'\n else:\n diff = len(isbn_separator) - 3\n msj += ' Hay ' + str(diff) + ' separador sobrante'\n\n if msj == '':\n format_valid = True\n\n elif len(isbn) < 13:\n msj = 'Faltan caracteres'\n\n else:\n msj = 'Se excede la cantidad de carácteres'\n\n return format_valid, msj", "def insert_repnr_fielddesc(inst):\n\telname = \"rn\"\n\telmarccode = \"037__a\"\n\teltype = \"I\"\n\telsize = \"30\"\n\telrows = \"\"\n\telcols = \"\"\n\telmaxlength = \"\"\n\telval = \"%s-<YYYY>-?????\" %(inst.upper())\n\telfidesc = \"\" \n\telmodifytext = \"\"\n\tinsert_element_details(elname, elmarccode, eltype, elsize, elrows, elcols, elmaxlength, elval, elfidesc, elmodifytext) # insert into sbmFIELDDESCR\t", "def extract_postcode(s):\n pc_regex = r'([Gg][Ii][Rr] 0[Aa]{2})|((([A-Za-z][0-9]{1,2})|(([A-Za-z][A-Ha-hJ-Yj-y]'\n pc_regex += r'[0-9]{1,2})|(([A-Za-z][0-9][A-Za-z])|([A-Za-z][A-Ha-hJ-Yj-y][0-9]?[A-Za-z]'\n pc_regex += r'))))\\s?[0-9][A-Za-z]{2})'\n\n re_search = re.search(pc_regex, s)\n if re_search:\n p = re_search.group(0)\n else:\n p = ''\n return p", "def validate_NRIC(nric):\n\tif len(nric) != 9: # invalid length\n\t\treturn \"Invalid length (must be exactly 9 characters, was given %d.)\" % len(\n\t\t nric)\n\n\t# Constants\n\tNRIC_ID = nric[0]\n\tLAST_LETTER = nric[-1]\n\tNUMBERS = nric[1:-1]\n\n\tif not match(r'[STFG]', nric):\n\t\t# First letter is not S, T, F or G\n\t\treturn \"Invalid NRIC ID: %s\" % NRIC_ID\n\n\t# The NRIC first and last letters should be a letter, the middle should\n\t# be all numbers (7 numbers exactly)\n\tif match(r'[STFG][0-9]+[A-Z]', nric) is None:\n\t\treturn \"Invalid format: %s\" % nric\n\n\tchecksum = calculate_checksum(NRIC_ID, NUMBERS)\n\tlast_letter_value = checksum % 11\n\tif last_letter_value == get_value(LAST_LETTER, NRIC_ID):\n\t\treturn \"Okay.\"\n\telse:\n\t\treturn \"Invalid NRIC, last letter must be %s.\" % get_letter(\n\t\t last_letter_value, NRIC_ID)", "def invalid_ssn(self) -> str:\n itin_group_numbers = [\n 70,\n 71,\n 72,\n 73,\n 74,\n 75,\n 76,\n 77,\n 78,\n 79,\n 80,\n 81,\n 82,\n 83,\n 84,\n 85,\n 86,\n 87,\n 88,\n 90,\n 91,\n 92,\n 94,\n 95,\n 96,\n 97,\n 98,\n 99,\n ]\n area = self.random_int(min=0, max=999)\n if area < 900 and area not in {666, 0}:\n random_group_or_serial = self.random_int(min=1, max=1000)\n if random_group_or_serial <= 500:\n group = 0\n serial = self.random_int(0, 9999)\n else:\n group = self.random_int(0, 99)\n serial = 0\n elif area in {666, 0}:\n group = self.random_int(0, 99)\n serial = self.random_int(0, 9999)\n else:\n group = self.random_element([x for x in range(0, 100) if x not in itin_group_numbers])\n serial = self.random_int(0, 9999)\n\n invalid_ssn = f\"{area:03d}-{group:02d}-{serial:04d}\"\n return invalid_ssn", "def _formatMatriculaValid(np):\n return len(np)==7 and np[:4].isdigit() and np[4:].isalpha()", "def formata_cpf_cnpj(cnpj_cpf):\n\n if cnpj_cpf:\n val = re.sub('[^0-9]', '', cnpj_cpf)\n val = val.lstrip(\"0\")\n if len(val) <= 11:\n val = val.zfill(11)\n return \"%s.%s.%s-%s\" % (\n val[0:3], val[3:6], val[6:9], val[9:11])\n elif 11 < len(val) <= 14:\n val = val.zfill(14)\n return \"%s.%s.%s/%s-%s\" % (\n val[0:2], val[2:5], val[5:8], val[8:12], val[12:14])", "def create_citation(cit):\n if cit is not None:\n if cit['citation-type'] == \"BIBTEX\":\n return pybtex.database.parse_string(cit['citation-value'], \"bibtex\")\n return None", "def testFormatISBN(self): \n val = format_isbn(\"1234567894123\")\n self.assertEqual(val,\"123-4-567-89412-3\")", "def validate(number):\n number = compact(number)\n if len(number) != 10:\n raise InvalidLength()\n if not _nipt_re.match(number):\n raise InvalidFormat()\n return number", "def string_to_partialdate(value):\n if isinstance(value, PartialDate):\n return value\n elif isinstance(value, datetime.date):\n return PartialDate.fromdate(value)\n elif not value:\n return None\n\n # It's a string.\n try:\n args = [int(x) for x in value.split('-')]\n except:\n raise ValidationError(\n \"Datum bitte im Format JJJJ-MM-TT angeben, \" +\n \"Monat und/oder Tag können ausgelassen werden.\")\n if len(args) < 1 or len(args) > 3:\n raise ValidationError(\n \"Datum bitte im Format JJJJ-MM-TT angeben, \" +\n \"Monat und/oder Tag können ausgelassen werden.\")\n try:\n return PartialDate(*args)\n except ValueError:\n raise ValidationError(\n \"Datum bitte im Format JJJJ-MM-TT angeben, \" +\n \"Monat und/oder Tag können ausgelassen werden.\")", "def test_out_of_range(self):\n term, rmd = util.parse_date(\"0699\")\n self.assertIsNone(util.parse_date_partial(term))", "def _validate(self, s: str):\n if not re.match(r'[a-z][a-z0-9\\-]{5,29}', s):\n raise ValueError(('Invalid Google Cloud Platform Project ID \"{}\": '\n 'must be between 6 and 30 characters and contain '\n 'lowercase letters, digits or hyphens').format(s))", "def verify_format(isbn):\n\n return len(isbn) == 10 and (isbn[-1] == \"X\" or isbn[-1].isdigit()) \\\n and all(digit.isdigit() for digit in isbn[:-1])", "def insert_course(dept, num, text):\n\n # Course Title \n m = re.search(\"[\\d\\w]{5} - ([\\w ]*)\", text)\n title = m.group(1) if m else \"nomatch\"\n\n # Course Description\n m = re.search(\"\\.\\s(.*)\\sTypically\",text)\n des = m.group(1) if m else \"nomatch\"\n\n # Credit hours aren't fixed for every course\n # Credit Hours: 2.00\n # Credit Hours: 2.00 or 3.00. \n # Credit Hours: 1.00 to 18.00. \n m = re.search(\"Credit Hours: (\\d+\\.\\d+)\",text, flags=re.IGNORECASE)\n m = re.search(\"(\\d+\\.\\d+)(.*?)Credit hours\",text, flags=re.IGNORECASE) if not m else m\n cr = m.group(1) if m else \"-1\"\n\n # Semesters Offered\n m = re.search(\"Typically offered (.*?)\\.\", text)\n sem = m.group(1).split() if m else [\"nomatch\"]\n\n # Course Type: Lecture, Recitation, Lab, Seminar, etc.\n m = re.search(\"Schedule Types:\\s((?:[\\w ]+)(?:,[\\w ]+)*) \\s+\", text)\n form = m.group(1).split(\", \") if m else [\"nomatch\"]\n\n # Learning objectives will not necessarily follow campuses\n m = re.search(\"campuses:(\\s+([\\w\\s])+\\n)\", text)\n campus = m.group(1).strip().split(\"\\n\\n\") if m else [\"nomatch\"]\n campus = [camp.strip() for camp in campus]\n\n # prereq regex and decomosition of prereqs into lists of AND conditions (works for most classes, not 477 and similar)\n # re.DOTALL matches all characters, including \"\\n\"\n idx = text.find(\"campuses:\")\n m = re.search(\"Prerequisites:(.*)\",text[idx:],flags=re.DOTALL)\n if m:\n allReqs = []\n prereqText = m.group(1).strip()\n prereqText = prereqText.encode('ascii', 'ignore') \n for i in PrereqParser.parseprereq(prereqText):\n reqArr = []\n for j in i.split():\n if j.find(\"-C\") != -1:\n j = j.replace(\"-C\",\"\")\n reqArr.append(Requisite(course=j,reqType=False))\n else:\n reqArr.append(Requisite(course=j,reqType=True)) \n allReqs.append(RequisiteList(courses=reqArr))\n\n else:\n allReqs = []\n\n # create course entity\n course = Course(number=num, title=title, department=dept, form=form,\n description=des, credits=float(cr), semesters=sem,\n campuses=campus,requisites=allReqs, id=dept + num)\n # store course \n course.put()", "def calc_check_digit_issn(issn):\n\n total = 0\n lissn = list(issn.replace('-', ''))\n\n for i, v in enumerate(lissn[:-1]):\n total = total + ((8-i) * int(v))\n\n remainder = total % 11\n\n if not remainder:\n check_digit = 0\n else:\n check_digit = 11 - remainder\n\n return 'X' if check_digit == 10 else str(check_digit)", "def _format_intermediary_institution_56D(self, val):\n account = val.get('ACCOUNT')\n name = val.get('NAME')\n address = val.get('ADDRESS')\n if name and address:\n name = FSwiftWriterUtils.split_text_and_prefix(name, 35)\n address = FSwiftWriterUtils.split_text_and_prefix(address, 35)\n val = FSwiftWriterUtils.allocate_space_for_name_address_without_constraint(name, address)\n if account:\n val = \"/\" + str(account) + \"\\n\" + str(val)\n return val", "def _validate_intermediary_institution_56D(self, val):\n return val", "def testBadFormatISBN(self): \n val = format_isbn(\"1234567843534594123\")\n self.assertFalse(val)", "def process_pub_date(pmid, year, mon, day, medline_date):\n\n if not year:\n year = 1900\n\n if medline_date:\n\n match = re.search(r\"\\d{4,4}\", medline_date)\n if match:\n year = match.group(0)\n\n if int(year) < 1900:\n year = 1900\n\n if year and re.match(\"[a-zA-Z]+\", mon):\n try:\n pub_date = datetime.datetime.strptime(f\"{year}-{mon}-{day}\", \"%Y-%b-%d\").strftime(\n \"%Y-%m-%d\"\n )\n except Exception as e:\n pub_date = \"1900-01-01\"\n log.error(f\"Problem converting {year} {mon} {day} to pubdate for PMID:{pmid}\")\n\n elif year:\n pub_date = f\"{year}-{mon}-{day}\"\n\n else:\n pub_date = None\n if year and re.match(\"[a-zA-Z]+\", mon):\n try:\n pub_date = datetime.datetime.strptime(f\"{year}-{mon}-{day}\", \"%Y-%b-%d\").strftime(\n \"%Y-%m-%d\"\n )\n except Exception as e:\n pub_date = \"1900-01-01\"\n log.error(f\"Problem converting {year} {mon} {day} to pubdate for PMID:{pmid}\")\n\n elif year:\n pub_date = f\"{year}-{mon}-{day}\"\n\n return pub_date", "def fix_errors_in_citation(citation):\n result = regex.sub(r\"\\s+\", \" \", citation)\n result = regex.sub(r\"§(?=\\d)\", \"§ \", result)\n result = regex.sub(r\",\\sbis\\s\", \" bis \", result)\n return result", "def validate_cid_regex(cid: str) -> None:\n if not re.match(fr\"^{compound_settings.PREFIX}CID\\d0\\d+$\", cid):\n raise ValidationError(\n f\"Invalid format. Expected {compound_settings.PREFIX}CID$0######.\"\n )" ]
[ "0.5829142", "0.56344336", "0.55918896", "0.5059645", "0.5048119", "0.5004267", "0.49888775", "0.47327802", "0.4711844", "0.46778655", "0.46777925", "0.45442563", "0.45348784", "0.45309243", "0.45161062", "0.45033395", "0.45016456", "0.44776788", "0.44747177", "0.44611934", "0.4439748", "0.44302863", "0.44151488", "0.44048056", "0.4388853", "0.43625435", "0.43558568", "0.43390805", "0.43379837", "0.4320928" ]
0.7332778
0
construct_postscript raises ValueError if sequence outside range Sequence can be between 00 and 99. Although in our usage the second digit is the ISO weekday, so in practice limited 0107 and 9197, special sequences may require the extra numbers. Must not raise for sequences 0009, represented by integers 09, as these are valid sequences (and should be padded as such), tested separately.
def test_sequence_outside_range(self): seqs = [-1, 100] for seq in seqs: with self.subTest(seq=seq): with self.assertRaisesRegex(ValueError, str(seq)): star_barcode.construct_postscript( sequence=seq, bwipp_location=self.bwipp, issn=self.issn, week=46, header_line='' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_sequence_0_to_9(self):\n seqs = list(range(10))\n for seq in seqs:\n with self.subTest(seq=seq):\n result = star_barcode.construct_postscript(\n sequence=seq,\n bwipp_location=self.bwipp,\n issn=self.issn,\n week=20,\n header_line=''\n )\n self.assertGreater(\n result.find(f'{self.issn} {seq:02}'),\n -1\n )", "def test_issn_incorrect_length(self):\n issns = ['0307-15', '0307-15789', '03071758', '0307175']\n for num in issns:\n with self.subTest(num=num):\n with self.assertRaisesRegex(ValueError, num):\n star_barcode.construct_postscript(\n issn=num,\n bwipp_location=self.bwipp,\n sequence=21,\n week=46,\n header_line=''\n )", "def test_typical(self):\n seq = 21\n week = 46\n header = 'MSTAR 2016-11-14 MON 1.0'\n issn_args = ' '.join([self.issn, str(seq), str(week)])\n result = star_barcode.construct_postscript(\n bwipp_location=self.bwipp,\n issn=self.issn,\n sequence=seq,\n week=week,\n header_line=header\n )\n self.assertGreater(result.find(str(self.bwipp)), -1)\n self.assertGreater(result.find(issn_args), -1)\n self.assertGreater(result.find(header), -1)", "def create_seqeunce_helper(i, text, dsl, char_count, char_to_n, extra, length = seq_length):\n\n seq_int = [] # Sequence mapped to integers\n output_seq = np.zeros((length, char_count)) # Output sequence which will become one item in input array \n\n # Get the next sequence and map its characters to integers\n for v in text[i * length + extra : (i + 1) * length + extra]:\n # If the seed_text is missing a character we append 0\n if v in char_to_n:\n seq_int.append(char_to_n[v])\n else:\n seq_int.append(0)\n\n # For character in sequence\n for j in range(length):\n # Set column corrpsonding to that character to 1\n output_seq[j][seq_int[j]] = 1.0 \n\n return output_seq", "def test_week_wrong(self):\n weeks = [0, 54]\n for week in weeks:\n with self.subTest(week=week):\n with self.assertRaisesRegex(ValueError, str(week)):\n star_barcode.construct_postscript(\n week=week,\n bwipp_location=self.bwipp,\n issn=self.issn,\n sequence=21,\n header_line=''\n )", "def test_week_in_range(self):\n weeks = list(range(1, 54))\n seq = 21\n for week in weeks:\n with self.subTest(week=week):\n result = star_barcode.construct_postscript(\n week=week,\n bwipp_location=self.bwipp,\n issn=self.issn,\n sequence=seq,\n header_line=''\n )\n self.assertGreater(\n result.find(f'{self.issn} {seq:02} {week:02}'),\n -1\n )", "def parse_sequence_key(self,\r\n seq_value,\r\n seq_value2=None):\r\n\r\n\r\n\r\n seq_type = str\r\n seq_mark = EMPTYCHAR\r\n\r\n if seq_value and seq_value in [DOLLAR,PLUS,POUND,UNDERLINE,CARET]:\r\n seq_type,seq_mark,seq_value = {DOLLAR:(str,EMPTYCHAR,EMPTYCHAR),\r\n PLUS:(int,EMPTYCHAR,EMPTYCHAR),\r\n POUND:(type(datetime.date(1972,3,13)),POUND,EMPTYCHAR),\r\n UNDERLINE:(type(Index(0)),UNDERLINE,EMPTYCHAR),\r\n CARET:(float,EMPTYCHAR,EMPTYCHAR)}[seq_value]\r\n return seq_mark,seq_value,seq_type, seq_value2\r\n\r\n\r\n\r\n\r\n if seq_value and seq_value[0] in [POUND,UNDERLINE]:\r\n seq_mark = seq_value[0]\r\n seq_value = seq_value[1:]\r\n\r\n if seq_mark == POUND:\r\n seq_value += '-01-01'\r\n seq_value = DASH.join(seq_value.split(DASH)[0:3])\r\n\r\n\r\n if is_date(seq_value):\r\n seq_value = is_date(seq_value,returndate=True)\r\n if seq_value2:\r\n seq_value2 = is_date(seq_value2,returndate=True)\r\n\r\n seq_type = type(datetime.date(1972,3,13))\r\n\r\n\r\n elif seq_mark == UNDERLINE:\r\n seq_value = Index(seq_value)\r\n if seq_value2:\r\n seq_value2 = Index(seq_value2)\r\n seq_type = type(Index(0))\r\n\r\n\r\n elif (((DASH in seq_value\r\n and len(seq_value) > 1\r\n and seq_value[0] == DASH\r\n and DASH not in seq_value[1:])\r\n or DASH not in seq_value)\r\n and ((PERIOD in seq_value\r\n and seq_value.count(PERIOD) == 1\r\n and PERIOD not in seq_value[0] and PERIOD not in seq_value[-1])\r\n or PERIOD not in seq_value) and\r\n seq_value.replace(PERIOD,\r\n EMPTYCHAR).replace(DASH,\r\n EMPTYCHAR).isnumeric()):\r\n\r\n seq_type = float\r\n if seq_type == float:\r\n seq_value = float(seq_value)\r\n if seq_value2:\r\n seq_value2 = float(seq_value2)\r\n\r\n return seq_mark, seq_value, seq_type, seq_value2", "def __manual_period(seqlen, first_base_pos, period, peroid_color='#ff5722'):\n cmd = \"-periodNum 0 \"\n j = 0\n annotation_cmd = \"\"\n for i in range(first_base_pos, first_base_pos+seqlen):\n j += 1\n if i%period == 0:\n annotation_cmd += f\"{i}:type=B,anchor={j},size=8,color={peroid_color};\"\n if annotation_cmd:\n cmd += f\"-annotations \\\"{annotation_cmd}\\\"\"\n return cmd", "def backtranslate(p_seq, n_seq):\r\n # Keep track of the new sequence. Also keep track of which codon we are\r\n # actually processing (gaps don't count)\r\n newseq = ''\r\n codon = 0\r\n for aa in p_seq:\r\n if aa == '-':\r\n newseq += '---'\r\n else:\r\n newseq += n_seq[codon*3:(codon*3) + 3]\r\n codon += 1\r\n return newseq", "def test_parse_fasta_title_09():\n seq_name, seq_end = blast.parse_fasta_title(\n 'title 2 after', 'mixed_ends', '')\n assert seq_name == 'title'\n assert seq_end == '2'", "def make_dt_ti_ptn(text):\n ptn = re.sub(r'\\s+', '', text) # remove spaces\n return re.sub(r'\\d', '0', ptn) # replace all numbers with zero", "def dna_number(bp_seq):\r\n # Hint: use dna_digit\r\n\r\n # YOUR CODE HERE\r", "def test_missing_bwipp(self):\n seq = 21\n week = 46\n header = 'MSTAR 2016-11-14 MON 1.0'\n with self.assertRaisesRegex(ValueError, 'BWIPP'):\n star_barcode.construct_postscript(\n bwipp_location=Path('/fake-path/not-here.ps'),\n issn=self.issn,\n sequence=seq,\n week=week,\n header_line=header\n )", "def patten2number(sequence):\n try:\n if len(sequence) == 0:\n return 0\n last_base = sequence[-1]\n prefix = sequence[:-1]\n return 4 * patten2number(prefix) + BASE_TO_NUMBER[last_base]\n except KeyError:\n raise ValueError('Not able to convert nucleotide: %s' % last_base)", "def _process_prosody(sonority):\n assert 9 not in sonority[1:-1]\n assert sonority[0] == sonority[-1] == 9\n\n # create the output values\n psequence = []\n first = True # stores whether first syllable is currently being processed\n\n for i in range(1, len(sonority) - 1):\n # get a segment with context\n a, b, c = sonority[i - 1], sonority[i], sonority[i + 1]\n\n if b == 7: # a vowel\n if first:\n psequence.append('X')\n first = False\n elif c == 9: # last\n psequence.append('Z')\n else:\n psequence.append('Y')\n elif b == 8: # a tone\n psequence.append('T')\n elif a >= b >= c or c == 8: # descending\n if c == 9: # word final position\n psequence.append('Z' if b == 7 else 'N') # vowel or consonant\n else:\n if first:\n first = False\n psequence.append('A')\n else:\n psequence.append('L')\n elif b < c or a > b <= c or a < b <= c: # ascending\n # check for syllable first\n if a == 9:\n psequence.append('A')\n elif a >= b:\n if c == 9:\n psequence.append('N')\n else:\n if psequence[-1] != 'A':\n psequence = psequence[:-1] + [psequence[-1].replace('L', 'M')] + ['B']\n else:\n psequence.append('C')\n else:\n psequence.append('C')\n elif a < b > c: # consonant peak\n if first:\n psequence.append('X')\n first = False\n else:\n psequence.append('Y')\n else:\n raise ValueError(\n \"Conversion to prosodic string failed due to a condition which was not \"\n \"defined in the convertion, for details compare the numerical string \"\n \"{0} with the profile string {1}\".format(sonority, psequence))\n return psequence", "def test_parse_fasta_title_08():\n seq_name, seq_end = blast.parse_fasta_title(\n 'title_1', 'mixed_ends', '')\n assert seq_name == 'title'\n assert seq_end == '1'", "def initiate_seq(end, nice_scaff):\n end_name = end.split(\"prime_\")\n \n contig_number = end.split(\"_\")[contig_num_pos + 1]\n \n contig = contigs_dict[end_name[1]]\n if end_name[0] == \"five\":\n contig = contig.reverse_complement()\n nice_scaff += contig_number + \"r\"\n else:\n \tnice_scaff += contig_number + \"f\"\n seq = contig\n return seq, nice_scaff", "def translate(nuc):\n\tfrom Bio import Seq\n\ttry:\n\t\ttmp_aa = Seq.translate(nuc.replace('-','N')) #returns string when argument is a string, Bio.Seq otherwise\n\texcept:\n\t\tprint(\"translation failed\",nuc)\n\t\ttmp_aa = 'X'*len(nuc)//3\n\taa_seq = \"\"\n\tfor i,aa in enumerate(tmp_aa):\n\t\tif nuc[i*3:(i+1)*3]=='---':\n\t\t\taa_seq+='-'\n\t\telse:\n\t\t\taa_seq+=aa\n\treturn aa_seq", "def test_parse_fasta_title_07():\n seq_name, seq_end = blast.parse_fasta_title(\n 'title', 'mixed_ends', '')\n assert seq_name == 'title'\n assert seq_end == ''", "def dna_to_protein(seq):\n\n # Verify a convertible sequence\n if len(seq) % 3 != 0:\n raise RuntimeError('Total number of bases must be a multiple of 3')\n\n # Iterate through adding the proteins\n protein = ''\n for i in range(0, len(seq), 3):\n protein += bioinfo_dicts.codons[seq[i:i+3]]\n return protein", "def create_string(start_index):\n return [NOTES_SHARP[(x + start_index) % 12] for x in range(0, 12)]", "def pad_sequence(seq):\n seq_split = seq.strip().split(\"1\")\n last = seq_split[0]\n new_seq = last + \"1\"\n inc_added = 0\n out_added = 0\n for i in range(1, len(seq_split)-1):\n current = seq_split[i]\n\n # break up the intial sequences that leak information by adding padding\n if current == last:\n if last == \"-\":\n new_seq += \"+1\"\n inc_added += 1\n last = \"+\"\n else:\n new_seq += \"-1\"\n out_added += 1\n last = \"-\"\n else:\n new_seq += current + \"1\"\n last = current\n\n # 30% chance to inject randomness\n coin = random.randint(1, 101)\n if coin <= 30:\n if coin % 2 == 0:\n new_seq += \"+1\"\n else:\n new_seq += \"-1\"\n \n # return padded sequence, original number of cells, \n # number of incoming padding cells, and number of outgoing padding cells\n return new_seq, len(seq_split), inc_added, out_added", "def _make_stamp():\n from random import choice\n from string import hexdigits\n\n length = 8\n return ''.join(choice(hexdigits) for m in range(length))", "def _get_seq_string(sequence) -> str:\n final_str = \"\"\n for seq in [sequence[i:i+60] for i in range(0, len(sequence), 60)]:\n final_str += \" \" + \" \".join(seq[i:i+10] for i in range(0, len(seq), 10)) + \"\\n\"\n\n return final_str", "def test_wrong_sequence(self):\n date = datetime(2016, 11, 12)\n seq = 31\n with self.assertRaises(ValueError):\n star_barcode.barcode_filename(date, seq)", "def create_DESeqRscript_replicates(infile=\"/projects/dowellde/groseq/data/replicates/gffcoverage/set1andset2.coverage.protein_coding\",columns=\"[10, 15, 11, 14]\", type_transcript=\"gffcoverage\", conditions=\"['DMS0', 'DMSO', 'Nutlin', 'Nutlin']\", condition1=\"DMSO\", condition2=\"Nutlin\",title_of_names_column=\"group\"):\n\n f = open(infile)\n headers = f.readline()\n headers = headers.strip(\"\\n\")\n headers = headers.split(\"\\t\")\n f.close()\n infile_dir = infile.split(\"/\")[:-1]\n infile_dir = \"/\".join(infile_dir)+\"/\"\n infile_root = infile.split(\"/\")[-1].strip(\".txt\")\n\tset_conditions = set(eval(conditions))\n\tset_conditions = list(set_conditions)\n outfile = infile_dir+infile_root+\".\"+condition1+condition2+type_transcript\n write_file = outfile+\".R\"\n print write_file\n wf = open(write_file ,\"w\")\n R_dump_file = outfile+\".Rout\"\n graph_file = outfile+\".png\"\n outfileallinputs = outfile+\".res.txt\"\n outfilesig = outfile+\".resSig.txt\"\n outfilesig_orderpval = outfile+\".resSig_pvalue.txt\"\n wf.write('sink(\"'+R_dump_file+'\")\\n')\n wf.write('library( DESeq )\\n')\n wf.write('data <- read.delim(\"'+infile+r'\", sep=\"\\t\", header=TRUE)'+\"\\n\")#need to check that \\t comes out like it should. Might write it wrong.\n\tcolumns_list = []\n\tcolumns = eval(columns)\n\tline = \", \".join(map(str,columns))\n wf.write('countsTable <- subset(data, select=c('+line+'))\\n')\n wf.write('rownames(countsTable) <- data$'+title_of_names_column+'\\n')\n\tconditions = eval(conditions)\n line = '\", \"'.join(conditions)\n wf.write('conds <- c(\"'+line+'\")\\n')\n wf.write('cds <- newCountDataSet( countsTable, conds )\\n')\n wf.write('cds <- estimateSizeFactors( cds )\\n')\n wf.write('sizeFactors(cds)\\n')\n wf.write(\"cds <- estimateDispersions( cds )\\n\")\n wf.write('res <- nbinomTest( cds, \"'+condition1+'\", \"'+condition2+'\" )\\n')\n wf.write('plotDE <- function( res ) plot(res$baseMean,res$log2FoldChange, log=\"x\", pch=20, cex=.1, col = ifelse( res$padj < .1, \"red\", \"black\" ) )\\n')\n wf.write(\"png('\"+graph_file+\"')\\n\")\n wf.write('plotDE( res )\\n')\n wf.write('dev.off()\\n')\n wf.write('resSig <- res[ res$padj < .1, ]\\n')\n wf.write('write.table(res, file = \"'+outfileallinputs+r'\", append = FALSE, sep = \"\\t\")'+\"\\n\")\n wf.write('write.table(resSig, file = \"'+outfilesig+r'\", append = FALSE, sep = \"\\t\")'+\"\\n\")\n wf.write('write.table(resSig[ order(resSig$pval), ], file = \"'+outfilesig_orderpval+r'\", append = FALSE, sep = \"\\t\")'+\"\\n\")\n wf.write('sink()\\n')", "def test_parse_fasta_title_11():\n seq_name, seq_end = blast.parse_fasta_title(\n 'title_1', 'single_ends', '')\n assert seq_name == 'title_1'\n assert seq_end == ''", "def prepseq(self, seq):\n\n wtf = re.sub(r'\\*$', '', seq)\n return wtf", "def generate_sequence(sequence):\n if sequence[len(sequence) - 1] == 'Z':\n return sequence + \"A\"\n\n s = SeqGen(26, 'ABCDEFGHIJKLMNOPQRSTUVWXYZ')\n\n if sequence[0].isdigit() and sequence[0] == '9':\n sequence = list(sequence)\n sequence[0] = 'A'\n sequence = ''.join(sequence)\n elif sequence[0] == 'Z':\n sequence = list(sequence)\n sequence[0] = '0'\n sequence = ''.join(sequence)\n sequence = sequence[0] + increment(sequence[1:], s)\n else:\n sequence = list(sequence)\n sequence[0] = (chr(ord(sequence[0]) + 1))\n sequence = ''.join(sequence)\n return sequence", "def test_parse_fasta_title_04():\n seq_name, seq_end = blast.parse_fasta_title(\n 'title/2 after', 'end_2', '2')\n assert seq_name == 'title'\n assert seq_end == '2'" ]
[ "0.6997066", "0.634367", "0.6063364", "0.55118203", "0.5302438", "0.5274324", "0.5145185", "0.5130142", "0.49437207", "0.49163416", "0.48809633", "0.48788667", "0.485482", "0.48448265", "0.48089215", "0.4794429", "0.47890186", "0.47434813", "0.4631714", "0.45985904", "0.4581076", "0.4578092", "0.45579717", "0.45489904", "0.45449865", "0.45411876", "0.45266455", "0.45229295", "0.45189008", "0.45142096" ]
0.6875425
1
construct_postscript must not raise for sequences 0009 As all possible sequences run 0099, they can be adequately represented by an integer rather than a string. But the construct_postscript function must not raise for integers 09, even though as (unformatted) strings they are of length 1.
def test_sequence_0_to_9(self): seqs = list(range(10)) for seq in seqs: with self.subTest(seq=seq): result = star_barcode.construct_postscript( sequence=seq, bwipp_location=self.bwipp, issn=self.issn, week=20, header_line='' ) self.assertGreater( result.find(f'{self.issn} {seq:02}'), -1 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_typical(self):\n seq = 21\n week = 46\n header = 'MSTAR 2016-11-14 MON 1.0'\n issn_args = ' '.join([self.issn, str(seq), str(week)])\n result = star_barcode.construct_postscript(\n bwipp_location=self.bwipp,\n issn=self.issn,\n sequence=seq,\n week=week,\n header_line=header\n )\n self.assertGreater(result.find(str(self.bwipp)), -1)\n self.assertGreater(result.find(issn_args), -1)\n self.assertGreater(result.find(header), -1)", "def test_sequence_outside_range(self):\n seqs = [-1, 100]\n for seq in seqs:\n with self.subTest(seq=seq):\n with self.assertRaisesRegex(ValueError, str(seq)):\n star_barcode.construct_postscript(\n sequence=seq,\n bwipp_location=self.bwipp,\n issn=self.issn,\n week=46,\n header_line=''\n )", "def test_issn_incorrect_length(self):\n issns = ['0307-15', '0307-15789', '03071758', '0307175']\n for num in issns:\n with self.subTest(num=num):\n with self.assertRaisesRegex(ValueError, num):\n star_barcode.construct_postscript(\n issn=num,\n bwipp_location=self.bwipp,\n sequence=21,\n week=46,\n header_line=''\n )", "def create_seqeunce_helper(i, text, dsl, char_count, char_to_n, extra, length = seq_length):\n\n seq_int = [] # Sequence mapped to integers\n output_seq = np.zeros((length, char_count)) # Output sequence which will become one item in input array \n\n # Get the next sequence and map its characters to integers\n for v in text[i * length + extra : (i + 1) * length + extra]:\n # If the seed_text is missing a character we append 0\n if v in char_to_n:\n seq_int.append(char_to_n[v])\n else:\n seq_int.append(0)\n\n # For character in sequence\n for j in range(length):\n # Set column corrpsonding to that character to 1\n output_seq[j][seq_int[j]] = 1.0 \n\n return output_seq", "def isPostscript(fmt):\n if fmt == 'POST' or fmt == 'PSCL' or fmt == 'PDF':\n return 1\n return 0", "def test_missing_bwipp(self):\n seq = 21\n week = 46\n header = 'MSTAR 2016-11-14 MON 1.0'\n with self.assertRaisesRegex(ValueError, 'BWIPP'):\n star_barcode.construct_postscript(\n bwipp_location=Path('/fake-path/not-here.ps'),\n issn=self.issn,\n sequence=seq,\n week=week,\n header_line=header\n )", "def ps2svg_string(sPostscript):\n\n def group_numbers(result, times = 1):\n nums = []\n for sNum in result.groups():\n if re.match(r'[a-zA-Z]+', sNum):\n # This is just a string\n nums.append(sNum)\n else:\n # This must be a floating point number\n nums.append(\"{:.6f}\".format(times * float(sNum) ))\n return nums\n\n sBack = \"\"\n lst_out = []\n oErr = ErrHandle()\n path_style = \"fill:none;stroke:#000000;stroke-width:16;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1\"\n point_style = \"font-variant:normal;font-weight:normal;font-size:13.39669991px;font-family:Times;-inkscape-font-specification:Times-Roman;writing-mode:lr-tb;fill:#0000FF;fill-opacity:1;fill-rule:nonzero;stroke:none\"\n try:\n # Recognize the initial lines we are looking for\n re_Line = re.compile( r'^\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+l$')\n re_point = re.compile(r'^([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+translate\\s+([0-9]+\\.?[0-9]*)\\s+rotate$')\n re_label = re.compile(r'^\\(([a-zA-Z]+)\\)\\s+show$')\n\n lst_out.append(sIntro)\n\n # Split into lines\n lines = sPostscript.split(\"\\n\")\n section = \"pre\"\n idx = 14\n point_info = []\n bFirstPoint = True\n oorsprong = dict(x=0.0, y=0.0)\n for line in lines:\n # Check if we have a line \n if section == \"pre\":\n result = re_Line.search(line)\n if result:\n section = \"lines\"\n else:\n # We are not in a lines section\n pass\n if section == \"lines\":\n result = re_Line.search(line)\n if result:\n nums = group_numbers(result, 10)\n # Convert into path line\n sPathLine = '<path id=\"path{}\" style=\"{}\" d=\"M {},{} {},{}\" />'.format(\n idx, path_style, nums[0], nums[1], nums[2], nums[3])\n idx += 2\n lst_out.append(sPathLine)\n else:\n # We have exited the lines section\n section = \"point\"\n lst_out.append('<g transform=\"scale(10)\" id=\"g{}\">'.format(idx))\n idx += 2\n elif section == \"point\":\n # Look for a point\n result = re_point.search(line)\n if result:\n # We have found a point: get it in\n nums = group_numbers(result, 1)\n\n # Is this the first point?\n if bFirstPoint:\n lst_out.append('<text id=\"text{}\" style=\"{}\" transform=\"matrix(1,0,0,-1,{},{})\">'.format(\n idx, point_style, nums[0], nums[1]))\n idx += 2\n oorsprong['x'] = float(nums[0])\n oorsprong['y'] = float(nums[1])\n bFirstPoint = False\n\n # In all situations: position w.r.t. oorsprong\n pos_x = \"{:.6f}\".format(float(nums[0]) - oorsprong['x']) \n pos_y = \"{:.6f}\".format(oorsprong['y'] - float(nums[1]) )\n point_info.append(pos_y)\n point_info.append(pos_x)\n\n section = \"label\"\n elif section == \"label\":\n # Look for a label\n result = re_label.search(line)\n if result:\n # we have found a label: get it\n sLabel = result.groups()[0]\n point_info.append(sLabel)\n\n # Output this label\n sLabel = '<tspan id=\"tspan{}\" y=\"{}\" x=\"{}\">{}</tspan>'.format(\n idx, pos_y, pos_x, sLabel)\n idx += 2\n lst_out.append(sLabel)\n\n section = \"point\"\n point_info = []\n\n # Finish up the svg nicely\n lst_out.append(\" </text>\")\n lst_out.append(\" </g>\")\n lst_out.append(\" </g>\")\n lst_out.append(\" </g>\")\n lst_out.append(\"</svg>\")\n # Convert the list into a string\n sBack = \"\\n\".join(lst_out)\n except:\n msg = oErr.get_error_message()\n oErr.DoError(\"ps2svg\")\n\n # Return what we have gathered\n return sBack", "def test_week_wrong(self):\n weeks = [0, 54]\n for week in weeks:\n with self.subTest(week=week):\n with self.assertRaisesRegex(ValueError, str(week)):\n star_barcode.construct_postscript(\n week=week,\n bwipp_location=self.bwipp,\n issn=self.issn,\n sequence=21,\n header_line=''\n )", "def ps2svg_simple(sPostscript):\n\n def group_numbers(result, times = 1):\n nums = []\n for sNum in result.groups():\n if re.match(r'[a-zA-Z]+', sNum):\n # This is just a string\n nums.append(sNum)\n else:\n # This must be a floating point number\n nums.append(\"{:.6f}\".format(times * float(sNum) ))\n return nums\n\n sBack = \"\"\n lst_out = []\n oErr = ErrHandle()\n line_style = 'stroke:black;stroke-width:1'\n point_style = \"fill:blue;font-family:Times\"\n offset_y = 18 # Adding 18px to compensate for double mirroring\n min_y = width_simple\n min_x = height_simple\n max_y = 0\n max_x = 0\n try:\n # Recognize the initial lines we are looking for\n re_Line = re.compile( r'^\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+l$')\n re_point = re.compile(r'^([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+translate\\s+([0-9]+\\.?[0-9]*)\\s+rotate$')\n re_label = re.compile(r'^\\(([a-zA-Z]+)\\)\\s+show$')\n\n lst_out.append(sIntroSimple)\n\n # Split into lines\n lines = sPostscript.split(\"\\n\")\n section = \"pre\"\n idx = 14\n bFirstPoint = True\n oorsprong = dict(x=0.0, y=0.0)\n for line in lines:\n # Check if we have a line \n if section == \"pre\":\n result = re_Line.search(line)\n if result:\n section = \"lines\"\n else:\n # We are not in a lines section\n pass\n if section == \"lines\":\n result = re_Line.search(line)\n if result:\n nums = group_numbers(result, 1)\n # Convert into <line> element\n sLine = '<g id=line{}><line x1=\"{}\" y1=\"{}\" x2=\"{}\" y2=\"{}\" style=\"{}\" stroke-linecap=\"round\" /></g>'.format(\n idx, nums[0], nums[1], nums[2], nums[3], line_style)\n idx += 2\n lst_out.append(sLine)\n\n # Keep track of min_y and min_x\n min_x = min(min_x, float(nums[0]), float(nums[2]))\n min_y = min(min_y, float(nums[1]), float(nums[3]))\n max_x = max(max_x, float(nums[0]), float(nums[2]))\n max_y = max(max_y, float(nums[1]), float(nums[3]))\n else:\n # We have exited the lines section\n section = \"point\"\n\n elif section == \"point\":\n # Look for a point\n result = re_point.search(line)\n if result:\n # We have found a point: get it in\n nums = group_numbers(result, 1)\n pos_x = \"{:.6f}\".format(float(nums[0])) \n pos_y = \"{:.6f}\".format(float(nums[1]) + offset_y )\n\n # Keep track of min_y and min_x\n min_x = min(min_x, float(nums[0]))\n min_y = min(min_y, float(nums[1]))\n max_x = max(max_x, float(nums[0]))\n max_y = max(max_y, float(nums[1]))\n\n section = \"label\"\n elif section == \"label\":\n # Look for a label\n result = re_label.search(line)\n if result:\n # we have found a label: get it\n sLabel = result.groups()[0]\n\n # Output this label\n sLabel = '<g id=\"text{}\"><text y=\"{}\" x=\"{}\" style=\"{}\">{}</text></g>'.format(\n idx, pos_y, pos_x, point_style, sLabel)\n idx += 2\n lst_out.append(sLabel)\n\n section = \"point\"\n\n # Finish up the svg nicely\n lst_out.append(\"</svg>\")\n # Convert the list into a string\n sBack = \"\\n\".join(lst_out)\n\n # Adapt w.r.t. min_x and min_y, max_x, max_y\n fHeight = height_simple - 2 * min_y + offset_y\n sViewbox = 'viewBox=\"{} {} {} {}\" width=\"{}\" height=\"{}\"'.format(\n 0, min_y, width_simple, fHeight, width_simple, fHeight\n )\n sBack = sBack.replace('@viewbox', sViewbox)\n\n except:\n msg = oErr.get_error_message()\n oErr.DoError(\"ps2svg\")\n\n # Return what we have gathered\n return sBack", "def scriptNumBytes(n):\n if n == 0:\n return ByteArray()\n\n isNegative = n < 0\n if isNegative:\n n = -n\n\n result = ByteArray(length=9)\n i = 0\n while n > 0:\n result[i] = n & 0xFF\n n = n >> 8\n i += 1\n\n if result[i - 1] & 0x80 != 0:\n extraByte = 0x00\n if isNegative:\n extraByte = 0x80\n result[i] = extraByte\n i += 1\n elif isNegative:\n result[i - 1] |= 0x80\n\n return result[:i]", "def postamble(gcode, postamble):\r\n gcode.append('; <Start Postable> ')\r\n risePen(gcode)\r\n gcode+=postamble\r\n gcode.append('; <End Postable> ')", "def compile_script(s):\n f = io.BytesIO()\n for t in s.split():\n if t in OPCODE_TO_INT:\n f.write(bytes_from_int(OPCODE_TO_INT[t]))\n elif (\"OP_%s\" % t) in OPCODE_TO_INT:\n f.write(bytes_from_int(OPCODE_TO_INT[\"OP_%s\" % t]))\n else:\n if (t[0], t[-1]) == ('[', ']'):\n t = t[1:-1]\n if len(t) == 1:\n t = \"0\" + t\n if t[:2] == \"0x\":\n t = t[2:]\n t = binascii.unhexlify(t.encode(\"utf8\"))\n f.write(t)\n return f.getvalue()", "def create_DESeqRscript_replicates(infile=\"/projects/dowellde/groseq/data/replicates/gffcoverage/set1andset2.coverage.protein_coding\",columns=\"[10, 15, 11, 14]\", type_transcript=\"gffcoverage\", conditions=\"['DMS0', 'DMSO', 'Nutlin', 'Nutlin']\", condition1=\"DMSO\", condition2=\"Nutlin\",title_of_names_column=\"group\"):\n\n f = open(infile)\n headers = f.readline()\n headers = headers.strip(\"\\n\")\n headers = headers.split(\"\\t\")\n f.close()\n infile_dir = infile.split(\"/\")[:-1]\n infile_dir = \"/\".join(infile_dir)+\"/\"\n infile_root = infile.split(\"/\")[-1].strip(\".txt\")\n\tset_conditions = set(eval(conditions))\n\tset_conditions = list(set_conditions)\n outfile = infile_dir+infile_root+\".\"+condition1+condition2+type_transcript\n write_file = outfile+\".R\"\n print write_file\n wf = open(write_file ,\"w\")\n R_dump_file = outfile+\".Rout\"\n graph_file = outfile+\".png\"\n outfileallinputs = outfile+\".res.txt\"\n outfilesig = outfile+\".resSig.txt\"\n outfilesig_orderpval = outfile+\".resSig_pvalue.txt\"\n wf.write('sink(\"'+R_dump_file+'\")\\n')\n wf.write('library( DESeq )\\n')\n wf.write('data <- read.delim(\"'+infile+r'\", sep=\"\\t\", header=TRUE)'+\"\\n\")#need to check that \\t comes out like it should. Might write it wrong.\n\tcolumns_list = []\n\tcolumns = eval(columns)\n\tline = \", \".join(map(str,columns))\n wf.write('countsTable <- subset(data, select=c('+line+'))\\n')\n wf.write('rownames(countsTable) <- data$'+title_of_names_column+'\\n')\n\tconditions = eval(conditions)\n line = '\", \"'.join(conditions)\n wf.write('conds <- c(\"'+line+'\")\\n')\n wf.write('cds <- newCountDataSet( countsTable, conds )\\n')\n wf.write('cds <- estimateSizeFactors( cds )\\n')\n wf.write('sizeFactors(cds)\\n')\n wf.write(\"cds <- estimateDispersions( cds )\\n\")\n wf.write('res <- nbinomTest( cds, \"'+condition1+'\", \"'+condition2+'\" )\\n')\n wf.write('plotDE <- function( res ) plot(res$baseMean,res$log2FoldChange, log=\"x\", pch=20, cex=.1, col = ifelse( res$padj < .1, \"red\", \"black\" ) )\\n')\n wf.write(\"png('\"+graph_file+\"')\\n\")\n wf.write('plotDE( res )\\n')\n wf.write('dev.off()\\n')\n wf.write('resSig <- res[ res$padj < .1, ]\\n')\n wf.write('write.table(res, file = \"'+outfileallinputs+r'\", append = FALSE, sep = \"\\t\")'+\"\\n\")\n wf.write('write.table(resSig, file = \"'+outfilesig+r'\", append = FALSE, sep = \"\\t\")'+\"\\n\")\n wf.write('write.table(resSig[ order(resSig$pval), ], file = \"'+outfilesig_orderpval+r'\", append = FALSE, sep = \"\\t\")'+\"\\n\")\n wf.write('sink()\\n')", "def test_multibyte_script(Script):\n code = \"import datetime; datetime.d\"\n comment = \"# multi-byte comment あいうえおä\"\n s = ('%s\\n%s') % (code, comment)\n assert len(Script(s).complete(1, len(code)))", "def store_script(script):\n # I p1 script length\n # I p2 0\n ## extension ##\n # s script\n\n return _u2i(_pigpio_command_ext(\n _control, _PI_CMD_PROC, len(script), 0, script))", "def postscript(self):\n self.g.postscript_output(fileName='tmp2.ps',decorations='no')", "def push_script(data: str) -> str:\n data = bfh(data)\n from .opcode import opcodes\n\n data_len = len(data)\n\n # \"small integer\" opcodes\n if data_len == 0 or data_len == 1 and data[0] == 0:\n return bh2u(bytes([opcodes.OP_0]))\n elif data_len == 1 and data[0] <= 16:\n return bh2u(bytes([opcodes.OP_1 - 1 + data[0]]))\n elif data_len == 1 and data[0] == 0x81:\n return bh2u(bytes([opcodes.OP_1NEGATE]))\n\n return op_push(data_len) + bh2u(data)", "def create_DESeqRscript_no_replicates(infile=\"/projects/dowellde/groseq/data/set1/clipped_fastqM10/samfiles/sortedbamfiles/lncRNAs/compare_cov_fileless1neg_istead.txt\",column1=11, column2=14, type_transcript=\"lncRNAs\", condition1=\"DMS0\", condition2=\"Nutlin\", title_of_names_column=\"name\", order_flip=\"N\"):\n\n\tf = open(infile)\n\theaders = f.readline()\n\theaders = headers.strip(\"\\n\")\n\theaders = headers.split(\"\\t\")\n\tf.close()\n\tinfile_dir = infile.split(\"/\")[:-1]\n\tinfile_dir = \"/\".join(infile_dir)+\"/\"\n\tinfile_root = infile.split(\"/\")[-1].strip(\".txt\")\n\theadercondition1 = headers[column1-1]#adjust for the fact python starts counting with 0 and R with 1\n\theadercondition2 = headers[column2-1]#adjust for the fact python starts counting with 0 and R with 1\n\tif order_flip==\"N\":\n\t\toutfile = infile_dir+infile_root+\".\"+headercondition1+headercondition2+type_transcript\n\telse:\n\t\toutfile = infile_dir+infile_root+\".\"+headercondition2+headercondition1+type_transcript\n\twrite_file = outfile+\".R\"\n\tprint write_file\n\twf = open(write_file ,\"w\")\n\tR_dump_file = outfile+\".Rout\"\n\tgraph_file = outfile+\".png\"\n\toutfileallinputs = outfile+\".res.txt\"\n\toutfilesig = outfile+\".resSig.txt\"\n\toutfilesig_orderpval = outfile+\".resSig_pvalue.txt\"\n\twf.write('sink(\"'+R_dump_file+'\")\\n')\n\twf.write('library( DESeq )\\n')\n\twf.write('data <- read.delim(\"'+infile+r'\", sep=\"\\t\", header=TRUE)'+\"\\n\")#need to check that \\t comes out like it should. Might write it wrong.\n\twf.write('countsTable <- subset(data, select=c('+str(column1-1)+','+str(column2-1)+'))\\n')\n\twf.write('rownames(countsTable) <- data$'+title_of_names_column+'\\n')\n\twf.write('conds <- c(\"'+condition1+'\", \"'+condition2+'\")\\n')\n\twf.write('cds <- newCountDataSet( countsTable, conds )\\n')\n\twf.write('cds <- estimateSizeFactors( cds )\\n')\n\twf.write('sizeFactors(cds)\\n')\n\twf.write(\"cds <- estimateDispersions( cds, method='blind', sharingMode='fit-only' )\\n\")\n\tif order_flip==\"N\":\n\t\twf.write('res <- nbinomTest( cds, \"'+condition1+'\", \"'+condition2+'\" )\\n')\n\telse:\n\t\twf.write('res <- nbinomTest( cds, \"'+condition2+'\", \"'+condition1+'\" )\\n')\n\twf.write('plotDE <- function( res ) plot(res$baseMean,res$log2FoldChange, log=\"x\", pch=20, cex=.1, col = ifelse( res$padj < .1, \"red\", \"black\" ) )\\n')\n\twf.write(\"png('\"+graph_file+\"')\\n\")\n\twf.write('plotDE( res )\\n')\n\twf.write('dev.off()\\n')\n\twf.write('resSig <- res[ res$padj < .1, ]\\n')\n\twf.write('write.table(res, file = \"'+outfileallinputs+r'\", append = FALSE, sep = \"\\t\")'+\"\\n\")\n\twf.write('write.table(resSig, file = \"'+outfilesig+r'\", append = FALSE, sep = \"\\t\")'+\"\\n\")\n\twf.write('write.table(resSig[ order(resSig$pval), ], file = \"'+outfilesig_orderpval+r'\", append = FALSE, sep = \"\\t\")'+\"\\n\")\n\twf.write('sink()\\n')", "def backtranslate(p_seq, n_seq):\r\n # Keep track of the new sequence. Also keep track of which codon we are\r\n # actually processing (gaps don't count)\r\n newseq = ''\r\n codon = 0\r\n for aa in p_seq:\r\n if aa == '-':\r\n newseq += '---'\r\n else:\r\n newseq += n_seq[codon*3:(codon*3) + 3]\r\n codon += 1\r\n return newseq", "def test_script_to_fs_script_too_long():\n script = (b'shouldfit' * 3023)[:-1]\n _ = uflash.script_to_fs(script, uflash._MICROBIT_ID_V1)\n\n script += b'1'\n with pytest.raises(ValueError) as ex:\n _ = uflash.script_to_fs(script, uflash._MICROBIT_ID_V1)\n assert 'Python script must be less than' in ex.value.args[0]", "def dna_number(bp_seq):\r\n # Hint: use dna_digit\r\n\r\n # YOUR CODE HERE\r", "def test201b(self):\n self.spawn(\"./binary\").stdin(\"0\").stdin(\"2\").stdin(\"201\").stdout(\"11001001\\n\").exit(0)", "def _create_superscript_mapping():\n # 2 & 3 have different unicode superscript translations, so\n # we need to manually create different cases for them.\n # Also, 1 needs to be manually added with a different case.\n two_and_three = [2, 3]\n all_other_normal_nums = [0, *[i for i in range(4, 10)]]\n\n # Create the unicode superscripts for each of them.\n unicode_superscripts = [\n chr(0x2070 + i) for i in all_other_normal_nums]\n unicode_superscripts.extend(\n [chr(0x00B0 + i) for i in two_and_three])\n unicode_superscripts.append(chr(0x00B9))\n\n # Sort the list.\n normal, unicode = zip(*sorted(zip(\n [*all_other_normal_nums, *two_and_three, 1],\n unicode_superscripts)))\n\n # Convert the normal digits to strings.\n normal = [str(i) for i in normal]\n\n # Create a dict mapping the two.\n return DefaultDictionary(zip(normal, unicode))", "def test_week_in_range(self):\n weeks = list(range(1, 54))\n seq = 21\n for week in weeks:\n with self.subTest(week=week):\n result = star_barcode.construct_postscript(\n week=week,\n bwipp_location=self.bwipp,\n issn=self.issn,\n sequence=seq,\n header_line=''\n )\n self.assertGreater(\n result.find(f'{self.issn} {seq:02} {week:02}'),\n -1\n )", "def test_parse_fasta_title_09():\n seq_name, seq_end = blast.parse_fasta_title(\n 'title 2 after', 'mixed_ends', '')\n assert seq_name == 'title'\n assert seq_end == '2'", "def create_complete_assembly_code(formula: str) -> str:\n asm = \"org 100h\\n\"\n asm += infix_to_assembly(formula)\n asm += \"\"\"\\n\npop bx ; number to write to stdout\nmov ch, 02h ; counter for bytes to to print\n\nprint_routine:\n cmp ch, 0h\n je end_print_routine ; while bytes to print > 0\n \n cmp ch, 01h ; check if first or second byte, 02h is the first byte to print\n je get_second_byte\n get_first_byte:\n mov ah, 0h\n mov al, bh\n dec ch\n jmp print_byte\n get_second_byte:\n mov ah, 0h\n mov al, bl\n dec ch\n \n print_byte: ; do not alter registers bx, ch in here. ax carries the byte to print\n ; as a single byte will cover 2 digits from 0x00 to 0xff on the screen we will loop twice\n ; and use integer division to get the most and least significant bits\n mov dl, 010h\n div dl\n ; xor swap (remainder, division) -> (division, remainder)\n xor ah, al\n xor al, ah\n xor ah, al\n \n mov cl, 02h ; loop counter for the higher and lower 4 bits of register a\n loop:\n cmp cl, 01h ; 02h -> print ah, 01h -> print al\n je print_al\n print_ah:\n mov dl, ah\n jmp print_digit\n print_al:\n mov dl, al\n \n print_digit:\n cmp dl, 0ah ; to convert byte into human-readable character\n jge number_to_ascii_lowercase_letter\n number_to_ascii_number:\n add dl, 30h\n jmp to_stdout\n number_to_ascii_lowercase_letter:\n add dl, 87\n\n to_stdout:\n mov ah, 02h ; int 21h/02h for writing character to standard output\n mov dh, al\n int 21h ; the 2 lines before and after is to keep al's value as interrupt alters it\n mov al, dh\n dec cl\n jnz loop\n jmp print_routine\n \nend_print_routine:\n int 20h\n\"\"\"\n return asm", "def _create_subscript_mapping():\n # Create the normal and subscript digits list.\n normal_digits = [i for i in range(10)]\n subscript_digits = [chr(0x2080 + i) for i in range(10)]\n\n # Convert the normal digits to strings.\n normal_digits = [str(i) for i in normal_digits]\n\n # Create a dict mapping the two.\n return DefaultDictionary(zip(normal_digits, subscript_digits))", "def make_dt_ti_ptn(text):\n ptn = re.sub(r'\\s+', '', text) # remove spaces\n return re.sub(r'\\d', '0', ptn) # replace all numbers with zero", "def scriptGen(self,tmpd='/tmp/jose',libRev='last',submode='qsub',\n redirect=1,PBSoptions=''):\n jobname=self.name\n outdir=self.outd\n qsubdir=scratchdir+'/qsub/'+todayDate() #subdirectory to deposit the script\n if not os.path.exists(qsubdir): pastry('/bin/mkdir -p '+qsubdir)\n script=qsubdir+'/'+jobname+'.sh' #full script file name\n\n if len(jobname) > 15:\n sys.stderr.write('Error: job name '+jobname+' cannot exceed 15 characters')\n return ''\n if not os.path.exists(outdir): os.system('/bin/mkdir -p '+outdir)\n buf=''\n ulimit=int(float(mem_limit)*1024) #maximum resident memory size (Kb) to prevent swapping\n wd=tmpd+'/${PBS_JOBID}'\n #wd=tmpd+'/'+ re.compile('\\W').sub('',self.name) +'_$$' #working directory\n logname=jobname+'.log'\n local_log=wd+'/'+logname\n remote_log=outdir+'/'+logname\n buf= '#!/bin/bash\\n\\n'\n buf+= PBSoptions+'\\n\\n'\n buf+= '#bash function to update library\\n'\n buf+= self.updateNodeLib(libRev)+'\\n\\n'\n buf+= '#bash function to import temporary libs\\n'\n buf+= self.shared_temporal_libraries()+'\\n\\n'\n buf+= '#bash function to clean exit\\n'\n buf+= self.cleanup_exit(submode=submode)+'\\n\\n'\n buf+= 'echo \"'+script+'\"\\n' #write script name withing script body\n buf+= 'hostname\\n' #node where job will be run\n buf+= 'echo $PBS_JOBID\\n'\n buf+= 'ulimit -m '+`ulimit`+' #maximum memory\\n'\n buf+= 'source ~/.bash_profile >/dev/null #environment variables\\n'\n buf+= 'wd='+wd+' #working directory\\n'\n buf+= '/bin/mkdir -p $wd\\n'\n buf+= 'export LOCAL_LOG=\"'+local_log+'\"\\n'\n buf+= '/bin/touch $LOCAL_LOG\\n'\n if submode=='sub' and redirect:\n buf+='exec &> $LOCAL_LOG #redirect STODOUT, STDERR to LOCAL_LOG\\n' \n buf+= 'export REMOTE_LOG=\"'+remote_log+'\"\\n'\n\n but+= '#clean up old log file\\n'\n buf+= 'if [ -f $REMOTE_LOG ]; then\\n' \n buf+= ' /bin/rm -f $REMOTE_LOG\\n'\n buf+= 'fi\\n\\n'\n\n buf+= 'trap \"cleanup_exit 1\" TERM #in case of killing job\\n\\n'\n\n buf+= '#update node code library && import libraries\\n'\n buf+= 'if !('\n buf+= 'updateNodeLib && ' \n buf+= 'shared_temporal_libraries _PREPARE_'\n buf+= ');then\\n'\n buf+= ' cleanup_exit 1\\n'\n buf+= 'fi\\n\\n'\n \n buf+= '/bin/cp '+' '.join(self.inpl)+' $wd #bring input files\\n' \n buf+= 'cd $wd\\n\\n'\n buf+= '#Test command success\\n'\n buf+= 'exs=0 #variable holding script exit status\\n'\n buf+= 'if !('\n buf+= self.exe\n buf+= ');then\\n'\n buf+= ' exs=1\\n'\n buf+= 'fi\\n\\n'\n buf+= '#move even partial results (exs=1)\\n'\n buf+= '/bin/mv '+' '.join(self.outl)+' '+outdir+'\\n'\n buf+= 'cleanup_exit $exs'\n\n open(script,'w').write(buf)\n pastry('chmod u+x '+script)\n\n return script", "def create_string(start_index):\n return [NOTES_SHARP[(x + start_index) % 12] for x in range(0, 12)]" ]
[ "0.62206656", "0.6154193", "0.60054547", "0.50962126", "0.4953612", "0.49484235", "0.49279657", "0.49098706", "0.48882347", "0.4887555", "0.48665893", "0.48258013", "0.4716987", "0.47128314", "0.46274677", "0.45913213", "0.4562817", "0.45471853", "0.4519758", "0.450454", "0.4449535", "0.4441703", "0.44310609", "0.4429388", "0.4381328", "0.43719307", "0.43644878", "0.43315473", "0.43248233", "0.43235675" ]
0.6519479
0
construct_postscript raises ValueError if 0 < week < 54 ISO weeks must be between 1 and 53.
def test_week_wrong(self): weeks = [0, 54] for week in weeks: with self.subTest(week=week): with self.assertRaisesRegex(ValueError, str(week)): star_barcode.construct_postscript( week=week, bwipp_location=self.bwipp, issn=self.issn, sequence=21, header_line='' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_week_in_range(self):\n weeks = list(range(1, 54))\n seq = 21\n for week in weeks:\n with self.subTest(week=week):\n result = star_barcode.construct_postscript(\n week=week,\n bwipp_location=self.bwipp,\n issn=self.issn,\n sequence=seq,\n header_line=''\n )\n self.assertGreater(\n result.find(f'{self.issn} {seq:02} {week:02}'),\n -1\n )", "def test_typical(self):\n seq = 21\n week = 46\n header = 'MSTAR 2016-11-14 MON 1.0'\n issn_args = ' '.join([self.issn, str(seq), str(week)])\n result = star_barcode.construct_postscript(\n bwipp_location=self.bwipp,\n issn=self.issn,\n sequence=seq,\n week=week,\n header_line=header\n )\n self.assertGreater(result.find(str(self.bwipp)), -1)\n self.assertGreater(result.find(issn_args), -1)\n self.assertGreater(result.find(header), -1)", "def test_missing_bwipp(self):\n seq = 21\n week = 46\n header = 'MSTAR 2016-11-14 MON 1.0'\n with self.assertRaisesRegex(ValueError, 'BWIPP'):\n star_barcode.construct_postscript(\n bwipp_location=Path('/fake-path/not-here.ps'),\n issn=self.issn,\n sequence=seq,\n week=week,\n header_line=header\n )", "def test_issn_incorrect_length(self):\n issns = ['0307-15', '0307-15789', '03071758', '0307175']\n for num in issns:\n with self.subTest(num=num):\n with self.assertRaisesRegex(ValueError, num):\n star_barcode.construct_postscript(\n issn=num,\n bwipp_location=self.bwipp,\n sequence=21,\n week=46,\n header_line=''\n )", "def __init__(self, y, w):\n for d in xrange(-10, 370):\n date = datetime.date(y, 1, 1) + datetime.timedelta(d)\n if date.isocalendar() == (y, w, 1):\n date_a = date\n break\n else:\n raise ValueError(\"Invalid week\")\n date_b = date_a + datetime.timedelta(7)\n super(Week, self).__init__(date_a, date_b)", "def test_sequence_outside_range(self):\n seqs = [-1, 100]\n for seq in seqs:\n with self.subTest(seq=seq):\n with self.assertRaisesRegex(ValueError, str(seq)):\n star_barcode.construct_postscript(\n sequence=seq,\n bwipp_location=self.bwipp,\n issn=self.issn,\n week=46,\n header_line=''\n )", "def weekly():", "def weekNumber(self): # real signature unknown; restored from __doc__\r\n pass", "def process(raw):\n #global weekNum\n field = None\n entry = {}\n cooked = []\n number = -1\n\n for line in raw:\n log.debug(\"Line: {}\".format(line))\n line = line.strip()\n if len(line) == 0 or line[0] == \"#\":#if # is the first character, skip\n log.debug(\"Skipping\")\n continue\n parts = line.split(':')#split lines to before and after \":\"\n if len(parts) == 1 and field:#adds additional content to whatever the previously used field is\n entry[field] = entry[field] + line + \" \" \n continue\n if len(parts) == 2:#if there are 2 parts, the field is the first part and the content is the second part\n field = parts[0]\n content = parts[1]\n else:#if none of the above are correct there is an issue\n raise ValueError(\"Trouble with line: '{}'\\n\".format(line) +\n \"Split into |{}|\".format(\"|\".join(parts)))\n\n if field == \"begin\":#checking if this is the line with the start date\n try:#begin only triggers once (at least it should only trigger once)\n base = arrow.get(content, \"MM/DD/YYYY\")#get the date as an object named \"base\", will need to use this to determine start date and current week, arrow must have a \"current date\"?\n # base is the \"week 1\" date, DD = 1, DD + 7 = 2, DD + 14 = 3, DD + 21 = 4, etc\n #now i will make variables for the start date of each week, or find a way to take the difference between 2 dates\n #end = base#arrow.get(base, \"MM/DD/YYYY\")\n #end = end.shift(weeks=+10)\n #today = arrow.now()\n #today.format(\"MM/DD/YYYY\")\n #if today == base:\n # weekNum = 1\n #number = -1\n \"\"\"weeks = [base, base.shift(days=+7), base.shift(days=+14), base.shift(days=+21), base.shift(days=+28), base.shift(days=+35), base.shift(days=+42), base.shift(days=+49), base.shift(days=+56), base.shift(days=+63), base.shift(days=+70)]\n today = arrow.now()\n for i in range(0,9):\n if weeks[i] <= today <= weeks[i+1]:\n number = i+1\n if today > weeks[10]:\n number = 10\n elif today < weeks[0]:\n number = 0\n #base = arrow.format(\"MM/DD/YYYY\")\n else:\n raise ValueError(\"Big error calculating week\")\n #for index in range(1,70):\n # base = base.shift(days=+1)\n # if today == base:\n # weekNum = weekNum + (index % 7)\n # break \n base = base.format(\"MM/DD/YYYY\")\"\"\"\n except:\n raise ValueError(\"Unable to parse date {}\".format(content))#date is incorrectly formatted, should be MM/DD/YYYY\n #now I need to check if either of these weeks is the current week\n# for r in arrow.Arrow.span_range('day',\n elif field == \"week\":#this is the week number\n if entry:\n cooked.append(entry)\n entry = {}#make entry empty again\n #if content == currentWeekNum:\n #print(\"Content: \" + content)\n #print(\"Week Number: \" + currentWeekNum + \"\\n\")\n #print(\"Is Current Week?\" + currentWeekBool + \"\\n\")\n # currentWeekBool = True\n entry['topic'] = \"\"#these are all \"classes\" in the HTML document\n entry['project'] = \"\"\n entry['week'] = content#put the week number into the \"week\" field in the html document\n #entry['isCurrentWeek'] = currentWeekBool\n #currentWeekBool = False\n #if content == weekNum:\n # entry['bool'] = True\n #else:\n # entry['bool'] = True\n \"\"\"if \n if content == currentWeekNum:\n entry['isCurrentWeek'] = True\n else:\n entry['isCurrentWeek'] = False\"\"\"\n\n elif field == 'topic' or field == 'project':#from if len == 2, set the entry for the field to the content in the html doc\n entry[field] = content\n\n else:\n raise ValueError(\"Syntax error in line: {}\".format(line))\n #entryn = entry + \"\\n\"\n\t#cookedn = cooked + \"\\n\"\n\t#fieldn = field + \"\\n\"\n\t#print(\"Entry: \" + entryn)\n #print(\"Cooked: \" + cookedn)\n #print(\"Field: \" + fiieldn)\n if entry:#appends whatever added stuff to the whole docuemnt\n cooked.append(entry)\n\t#returns formatted document after it has been looped throughi\n #number = getWeekNum(raw)\n weeks = [base, base.shift(days=+7), base.shift(days=+14), base.shift(days=+21), base.shift(days=+28), base.shift(days=+35), base.shift(days=+42), base.shift(days=+49), base.shift(days=+56), base.shift(days=+63), base.shift(days=+70)]\n today = arrow.now()\n for i in range(0,9):\n if weeks[i] <= today <= weeks[i+1]:\n number = i+1\n return [cooked, i+1]\n if today < weeks[0]:\n number = 0\n else:\n number = 10\n return [cooked, number]", "def _normalize_publication_datetime(self, article_publication_datetime):\n datetime_parsed = dateparser.parse(article_publication_datetime)\n week_start = datetime_parsed - datetime.timedelta(\n days=datetime_parsed.weekday()\n )\n week_end = week_start + datetime.timedelta(days=6)\n\n return (\n week_start.strftime('%d-%m-%Y'),\n week_end.strftime('%d-%m-%Y')\n )", "def get_week_range(year, week):\n first_day = datetime.strptime(f\"{year}-W{week}-1\", \"%Y-W%W-%w\").date()\n last_day = first_day + timedelta(days=6)\n return first_day, last_day", "def test_sequence_0_to_9(self):\n seqs = list(range(10))\n for seq in seqs:\n with self.subTest(seq=seq):\n result = star_barcode.construct_postscript(\n sequence=seq,\n bwipp_location=self.bwipp,\n issn=self.issn,\n week=20,\n header_line=''\n )\n self.assertGreater(\n result.find(f'{self.issn} {seq:02}'),\n -1\n )", "def run_preprocessor():\n\n prepro = PP()\n df = prepro.load_data()\n\n df.subject_ch = df.subject_ch.map(lambda x: convert_emoticons(str(x)))\n\n df = prepro.remove_html_tags()\n\n\n df = remove_unwanted_columns(df)\n df[\"weeks\"] = df['created_ch'].dt.week\n\n # remove characteres\n df = prepro.normalization()\n\n # run tokenizer\n df = prepro.tokenizations()\n\n # remove characteres\n df = prepro.stop_word_remover()\n\n # remove characteres\n df = prepro.stemming_lemmatization()\n\n return df\n\n\n\n\n # def replace_week_numbers(df):\n # \"\"\"\n # functiion that change week number from 1 to 19\n # \"\"\"\n \n # return sorted(df.weeks.unique())\n\n # df[\"weeks_num\"] = df[\"weeks\"].map(lambda x: replace_week_numbers(df).index(x)+1 if(x in replace_week_numbers(df)) else np.nan)", "def GetWeekString(self, basic=False, truncation=NoTruncation):\n century, decade, year, week, day = self.GetWeekDay()\n if day is None:\n if week is None:\n # same as the calendar string\n return self.GetCalendarString(basic, truncation)\n else:\n if truncation == NoTruncation:\n if basic:\n return \"%02i%i%iW%02i\" % (century, decade, year, week)\n else:\n return \"%02i%i%i-W%02i\" % (century, decade, year, week)\n elif truncation == Truncation.Century:\n if basic:\n return \"%i%iW%02i\" % (decade, year, week)\n else:\n return \"%i%i-W%02i\" % (decade, year, week)\n elif truncation == Truncation.Decade:\n if basic:\n return \"-%iW%02i\" % (year, week)\n else:\n return \"-%i-W%02i\" % (year, week)\n elif truncation == Truncation.Year:\n return \"-W%02i\" % week\n else:\n raise ValueError\n else:\n if truncation == NoTruncation:\n if basic:\n return \"%02i%i%iW%02i%i\" % (\n century, decade, year, week, day)\n else:\n return \"%02i%i%i-W%02i-%i\" % (century,\n decade,\n year,\n week,\n day)\n elif truncation == Truncation.Century:\n if basic:\n return \"%i%iW%02i%i\" % (decade, year, week, day)\n else:\n return \"%i%i-W%02i-%i\" % (decade, year, week, day)\n elif truncation == Truncation.Decade:\n if basic:\n return \"-%iW%02i%i\" % (year, week, day)\n else:\n return \"-%i-W%02i-%i\" % (year, week, day)\n elif truncation == Truncation.Year:\n if basic:\n return \"-W%02i%i\" % (week, day)\n else:\n return \"-W%02i-%i\" % (week, day)\n elif truncation == Truncation.Week:\n return \"-W-%i\" % day\n else:\n raise ValueError", "def test_standard_seq_week(self):\n date = datetime(2016, 11, 15)\n prices = [2] * 7\n expected_sequence = 22\n expected_week = 46\n self.assertEqual(\n star_barcode.date_to_sequence_and_week(\n date=date, price_codes=prices),\n (expected_sequence, expected_week)\n )", "def test_weeks(self):\n d = datetime(2014, 1, 29)\n eq_(week_start(d), datetime(2014, 1, 27, 0, 0, 0))\n eq_(week_end(d), datetime(2014, 2, 2, 23, 59, 59))", "def test_weeks():\n assert_equal(datetime.timedelta(days=7), convert_delta(\"1w\"))", "def get_yearweek(yearweekstr: str) -> tuple:\n return tuple(map(int, yearweekstr.split('-W')))", "def CC_wdw(self):\n # Setup param\n loc = 'TSdata'\n if 'single' == self.newParam['survey_type']:\n TS_len = dt.utilities.DB_pd_data_load(self.Database, loc).shape[0]\n elif 'multiple' == self.newParam['survey_type']:\n TS_group = dt.utilities.DB_group_names(self.Database, group_name = loc)[0]\n TS_len = dt.utilities.DB_pd_data_load(self.Database, loc+'/'+TS_group).shape[0]\n\n param = self.newParam\n\n # Assign TS processing length to end_wdws if given\n if param['end_wdws']:\n TS_sig_len = param['end_wdws']\n else:\n TS_sig_len = TS_len\n\n ERROR_MESSAGE = 'The length of a TS signal to be processed is', TS_sig_len, \\\n 'which is < end of the last window'\n\n # Calculate wdwPos for overlapping windows of ww_ol if wdwPos is False\n if param['wdwPos'][0] is False:\n # Error checks\n if TS_sig_len < self.newParam['ww'][0]:\n raise Warning(ERROR_MESSAGE)\n\n wdwStep = np.floor(param['ww'][0] *\n (100 - param['ww_ol']) / 100)\n\n if self.verbose: print('* Length fo TSdata', TS_len)\n\n max_wdwPos = TS_sig_len - param['ww'][0] + 1\n wdwStarts = np.arange(0 + param['sta_wdws'], max_wdwPos, wdwStep).astype(int)\n\n if self.verbose: print('* The step in window potions is %s sample points' % wdwStep)\n if self.verbose: print('* The max window postions is %s sample points'% max_wdwPos)\n\n param['wdwPos'] = [ [wdw_start, wdw_start + param['ww'][0]] for\n wdw_start in wdwStarts ]\n\n # Only update wdwPos structure if not already done so\n elif np.array(param['wdwPos'][0]).shape == ():\n param['wdwPos'] = [ [wdw_start, wdw_start + ww] for wdw_start,ww in\n zip(param['wdwPos'], param['ww'])]\n\n self.newParam['wdwPos'] = param['wdwPos']", "def build_plan(num_weeks, weekly_mileage, days_first_week, days_last_week):\r\n plan = []\r\n spread = 2\r\n if num_weeks > 1:\r\n plan.append(split_week(days_first_week, weekly_mileage[0], spread)) \r\n for week in range(1, num_weeks - 1):\r\n plan.append(split_week(7, weekly_mileage[week], spread))\r\n plan.append(split_week(days_last_week - 1, weekly_mileage[-1], spread)\r\n + [26.2])\r\n return plan", "def ISOWEEKNUM(date):\n return _make_datetime(date).isocalendar()[1]", "def CONST_WEEK_TIMESTAMP() -> int:\n return 604800", "def get_week(time_index):\n return np.array(time_index.week).reshape(-1,1)", "def add_taper(plan, num_days, days_last_week):\r\n taper_vals = [5,3,1]\r\n if num_days >= 2:\r\n if days_last_week >= 2:\r\n plan[-1][-2] = taper_vals[-1]\r\n else:\r\n plan[-2][-1] = taper_vals[-1]\r\n if num_days >= 3:\r\n if days_last_week >= 3:\r\n plan[-1][-3] = taper_vals[-2]\r\n elif days_last_week == 2:\r\n plan[-2][-1] = taper_vals[-2]\r\n else:\r\n plan[-2][-2] = taper_vals[-2]\r\n if num_days >= 4:\r\n if days_last_week >= 4:\r\n plan[-1][-4] = taper_vals[-3]\r\n elif days_last_week == 3:\r\n plan[-2][-1] = taper_vals[-3]\r\n elif days_last_week == 2:\r\n plan[-2][-2] = taper_vals[-3]\r\n else:\r\n plan[-2][-3] = taper_vals[-3]\r\n return plan", "def test_interval_to_seconds_with_weeks(self):\n self.assert_interval_to_seconds(0, \"0w\", \"0week\", \"0weeks\")\n self.assert_interval_to_seconds(604800, \"1w\", \"1week\", \"1weeks\")\n self.assert_interval_to_seconds(4 * 604800, \"4w\", \"4week\", \"4weeks\")\n self.assert_interval_to_seconds(\n 123 * 604800, \"123w\", \"123week\", \"123weeks\")\n self.assert_interval_to_seconds(\n 12 * 604800, \"012w\", \"012week\", \"012weeks\")", "def ParseWeeklyChart(self, html, week):\n print 'Parsing chart for week of %s' % week\n chart = []\n soup = BeautifulSoup(html)\n table = soup.findAll('table')[3]\n table_rows = table.findAll('tr')[3:]\n for tr in table_rows:\n row = {}\n cols = tr.findAll('td')\n # Check whether the first cell in the row has a colspan attribute,\n # in which case we've reached the end of the table.\n try:\n cols[0]['colspan']\n break\n except KeyError:\n pass\n title = cols[2].text\n title = title.replace('\\'', '\\'\\'') # Escape single quotes.\n row['title'] = title\n link = cols[2].find('a')\n m = re.match('.*id=(?P<id>.*)\\.htm.*', str(link).lower())\n row['id'] = m.group('id')\n row['studio'] = cols[3].text\n row['gross'] = re.sub('[^\\d\\.]', '', cols[4].text)\n row['theaters'] = re.sub('[^\\d]', '', cols[6].text)\n row['budget'] = re.sub('[^\\d]', '', cols[10].text) or 'NULL'\n row['week'] = week\n self.InsertChartRow(row)", "def get_week_from_datestr(datestr: str) -> int:\n return date.fromisoformat(datestr).isocalendar()[1]", "def get_week_of_year(date, padded_or_unpadded, start_Sunday_or_Monday):\n if start_Sunday_or_Monday == constants.str_Sunday:\n week_of_year = date.strftime('%U')\n elif start_Sunday_or_Monday == constants.str_Monday:\n week_of_year = date.strftime('%W')\n else:\n err_msg = str_possible_values('start_Sunday_or_Monday', [\n constants.str_Sunday, constants.str_Monday])\n raise ValueError(err_msg)\n\n if padded_or_unpadded == constants.str_padded:\n return week_of_year\n elif padded_or_unpadded == constants.str_unpadded:\n return str(int(week_of_year))\n else:\n err_msg = str_possible_values('padded_or_unpadded', [\n constants.str_padded, constants.str_unpadded])\n raise ValueError(err_msg)", "def test_weekly_resolution_hindcast(daily_initialized, daily_obs):\n weekly_hindcast = daily_initialized.resample(init=\"W\").mean()\n weekly_obs = daily_obs.resample(time=\"W\").mean()\n weekly_hindcast.lead.attrs[\"units\"] = \"weeks\"\n assert compute_hindcast(weekly_hindcast, weekly_obs).all()", "def workbookToSqlStatements(workbook,table_name,sheet_name_0,sheet_name_splitter,sheet_name_1,cols_name,ints,length_varchar,min_row):\n create_sql_statement = \"\"\n create_sql_statement = f\"CREATE TABLE IF NOT EXISTS {table_name} \"\n create_sql_statement += f\"(id SERIAL PRIMARY KEY , {sheet_name_0} INT, {sheet_name_1} INT,\"\n cols = str(cols_name).strip(\"]\")\n cols = cols.strip(\"[\")\n for i in range(len(cols_name)):\n cols = cols.replace(\"'\",\"\")\n cols = cols.replace(\"'\",\"\")\n if i in ints:\n create_sql_statement += f\"{cols_name[i]} INT\"\n else:\n create_sql_statement += f\"{cols_name[i]} CHAR({length_varchar})\"\n\n if i != len(cols_name) - 1:\n create_sql_statement += \",\"\n create_sql_statement += \");\"\n\n\n insert_sql_statement = \"\"\n insert_sql_statement = f\"insert into {table_name} ({sheet_name_0},{sheet_name_1},{cols}) values \"\n weeks_inserted = 0\n\n for sheet in workbook.worksheets:\n sheet_name_list = str(sheet.title).split(sheet_name_splitter)\n year = sheet_name_list[0]\n week = sheet_name_list[1]\n weeks_inserted += 1\n for row in sheet.iter_rows(min_row=min_row,values_only=True):\n values = \"(\"\n values += f\"{year},{week},\"\n insert = True\n for i in range(len(cols_name)):\n try:\n if i in ints:\n\n cell = str(row[i])\n try:\n values += f\"{int(cell)}\"\n except:\n values += \"null\"\n else:\n if str(row[i]) != \"\" and str(row[i]) != \"None\":\n values += f\"'{str(row[i]).rstrip()}'\"\n else:\n insert = False\n\n if i != len(cols_name) - 1:\n values += \",\"\n else:\n break\n except:\n return print(\"error the length of the list cols_name exeed the length of max columns of the sheet\")\n\n values += \")\"\n\n if insert:\n insert_sql_statement += values + \",\\n\"\n temp = len(insert_sql_statement)\n insert_sql_statement = insert_sql_statement[:temp - 2]\n insert_sql_statement += \";\"\n\n #insert_sql_statement = insert_sql_statement.replace(\",\\n;\",\"\\n;\")\n return str(weeks_inserted), create_sql_statement, insert_sql_statement" ]
[ "0.6856618", "0.59355134", "0.5696191", "0.5515064", "0.5420971", "0.515004", "0.5008384", "0.49796438", "0.48173553", "0.4778165", "0.47201556", "0.47167677", "0.47131327", "0.47100455", "0.46958074", "0.46758375", "0.46508726", "0.45938125", "0.4555849", "0.45406747", "0.4535208", "0.45237345", "0.44788688", "0.44786444", "0.44726822", "0.44724128", "0.44398558", "0.44204256", "0.44189864", "0.4416933" ]
0.7138101
0
construct_postscript accepts week >= 1, <= 53
def test_week_in_range(self): weeks = list(range(1, 54)) seq = 21 for week in weeks: with self.subTest(week=week): result = star_barcode.construct_postscript( week=week, bwipp_location=self.bwipp, issn=self.issn, sequence=seq, header_line='' ) self.assertGreater( result.find(f'{self.issn} {seq:02} {week:02}'), -1 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def weekly():", "def test_week_wrong(self):\n weeks = [0, 54]\n for week in weeks:\n with self.subTest(week=week):\n with self.assertRaisesRegex(ValueError, str(week)):\n star_barcode.construct_postscript(\n week=week,\n bwipp_location=self.bwipp,\n issn=self.issn,\n sequence=21,\n header_line=''\n )", "def __init__(self, y, w):\n for d in xrange(-10, 370):\n date = datetime.date(y, 1, 1) + datetime.timedelta(d)\n if date.isocalendar() == (y, w, 1):\n date_a = date\n break\n else:\n raise ValueError(\"Invalid week\")\n date_b = date_a + datetime.timedelta(7)\n super(Week, self).__init__(date_a, date_b)", "def test_typical(self):\n seq = 21\n week = 46\n header = 'MSTAR 2016-11-14 MON 1.0'\n issn_args = ' '.join([self.issn, str(seq), str(week)])\n result = star_barcode.construct_postscript(\n bwipp_location=self.bwipp,\n issn=self.issn,\n sequence=seq,\n week=week,\n header_line=header\n )\n self.assertGreater(result.find(str(self.bwipp)), -1)\n self.assertGreater(result.find(issn_args), -1)\n self.assertGreater(result.find(header), -1)", "def weekNumber(self): # real signature unknown; restored from __doc__\r\n pass", "def day_of_the_week(arg):", "def main():\n print(day_of_week(datetime.now()))\n print(day_of_week(datetime(2019, 7, 4)))\n print(day_of_week(datetime(2013, 12, 25)))\n print(day_of_week(datetime(2000, 1, 1)))", "def day_07_b() -> int:\n return 0", "def CONST_WEEK_TIMESTAMP() -> int:\n return 604800", "def do_upw(self, arg):\n self.do_timesheet('update week')", "def process(raw):\n #global weekNum\n field = None\n entry = {}\n cooked = []\n number = -1\n\n for line in raw:\n log.debug(\"Line: {}\".format(line))\n line = line.strip()\n if len(line) == 0 or line[0] == \"#\":#if # is the first character, skip\n log.debug(\"Skipping\")\n continue\n parts = line.split(':')#split lines to before and after \":\"\n if len(parts) == 1 and field:#adds additional content to whatever the previously used field is\n entry[field] = entry[field] + line + \" \" \n continue\n if len(parts) == 2:#if there are 2 parts, the field is the first part and the content is the second part\n field = parts[0]\n content = parts[1]\n else:#if none of the above are correct there is an issue\n raise ValueError(\"Trouble with line: '{}'\\n\".format(line) +\n \"Split into |{}|\".format(\"|\".join(parts)))\n\n if field == \"begin\":#checking if this is the line with the start date\n try:#begin only triggers once (at least it should only trigger once)\n base = arrow.get(content, \"MM/DD/YYYY\")#get the date as an object named \"base\", will need to use this to determine start date and current week, arrow must have a \"current date\"?\n # base is the \"week 1\" date, DD = 1, DD + 7 = 2, DD + 14 = 3, DD + 21 = 4, etc\n #now i will make variables for the start date of each week, or find a way to take the difference between 2 dates\n #end = base#arrow.get(base, \"MM/DD/YYYY\")\n #end = end.shift(weeks=+10)\n #today = arrow.now()\n #today.format(\"MM/DD/YYYY\")\n #if today == base:\n # weekNum = 1\n #number = -1\n \"\"\"weeks = [base, base.shift(days=+7), base.shift(days=+14), base.shift(days=+21), base.shift(days=+28), base.shift(days=+35), base.shift(days=+42), base.shift(days=+49), base.shift(days=+56), base.shift(days=+63), base.shift(days=+70)]\n today = arrow.now()\n for i in range(0,9):\n if weeks[i] <= today <= weeks[i+1]:\n number = i+1\n if today > weeks[10]:\n number = 10\n elif today < weeks[0]:\n number = 0\n #base = arrow.format(\"MM/DD/YYYY\")\n else:\n raise ValueError(\"Big error calculating week\")\n #for index in range(1,70):\n # base = base.shift(days=+1)\n # if today == base:\n # weekNum = weekNum + (index % 7)\n # break \n base = base.format(\"MM/DD/YYYY\")\"\"\"\n except:\n raise ValueError(\"Unable to parse date {}\".format(content))#date is incorrectly formatted, should be MM/DD/YYYY\n #now I need to check if either of these weeks is the current week\n# for r in arrow.Arrow.span_range('day',\n elif field == \"week\":#this is the week number\n if entry:\n cooked.append(entry)\n entry = {}#make entry empty again\n #if content == currentWeekNum:\n #print(\"Content: \" + content)\n #print(\"Week Number: \" + currentWeekNum + \"\\n\")\n #print(\"Is Current Week?\" + currentWeekBool + \"\\n\")\n # currentWeekBool = True\n entry['topic'] = \"\"#these are all \"classes\" in the HTML document\n entry['project'] = \"\"\n entry['week'] = content#put the week number into the \"week\" field in the html document\n #entry['isCurrentWeek'] = currentWeekBool\n #currentWeekBool = False\n #if content == weekNum:\n # entry['bool'] = True\n #else:\n # entry['bool'] = True\n \"\"\"if \n if content == currentWeekNum:\n entry['isCurrentWeek'] = True\n else:\n entry['isCurrentWeek'] = False\"\"\"\n\n elif field == 'topic' or field == 'project':#from if len == 2, set the entry for the field to the content in the html doc\n entry[field] = content\n\n else:\n raise ValueError(\"Syntax error in line: {}\".format(line))\n #entryn = entry + \"\\n\"\n\t#cookedn = cooked + \"\\n\"\n\t#fieldn = field + \"\\n\"\n\t#print(\"Entry: \" + entryn)\n #print(\"Cooked: \" + cookedn)\n #print(\"Field: \" + fiieldn)\n if entry:#appends whatever added stuff to the whole docuemnt\n cooked.append(entry)\n\t#returns formatted document after it has been looped throughi\n #number = getWeekNum(raw)\n weeks = [base, base.shift(days=+7), base.shift(days=+14), base.shift(days=+21), base.shift(days=+28), base.shift(days=+35), base.shift(days=+42), base.shift(days=+49), base.shift(days=+56), base.shift(days=+63), base.shift(days=+70)]\n today = arrow.now()\n for i in range(0,9):\n if weeks[i] <= today <= weeks[i+1]:\n number = i+1\n return [cooked, i+1]\n if today < weeks[0]:\n number = 0\n else:\n number = 10\n return [cooked, number]", "def add_taper(plan, num_days, days_last_week):\r\n taper_vals = [5,3,1]\r\n if num_days >= 2:\r\n if days_last_week >= 2:\r\n plan[-1][-2] = taper_vals[-1]\r\n else:\r\n plan[-2][-1] = taper_vals[-1]\r\n if num_days >= 3:\r\n if days_last_week >= 3:\r\n plan[-1][-3] = taper_vals[-2]\r\n elif days_last_week == 2:\r\n plan[-2][-1] = taper_vals[-2]\r\n else:\r\n plan[-2][-2] = taper_vals[-2]\r\n if num_days >= 4:\r\n if days_last_week >= 4:\r\n plan[-1][-4] = taper_vals[-3]\r\n elif days_last_week == 3:\r\n plan[-2][-1] = taper_vals[-3]\r\n elif days_last_week == 2:\r\n plan[-2][-2] = taper_vals[-3]\r\n else:\r\n plan[-2][-3] = taper_vals[-3]\r\n return plan", "def ISOWEEKNUM(date):\n return _make_datetime(date).isocalendar()[1]", "def run_preprocessor():\n\n prepro = PP()\n df = prepro.load_data()\n\n df.subject_ch = df.subject_ch.map(lambda x: convert_emoticons(str(x)))\n\n df = prepro.remove_html_tags()\n\n\n df = remove_unwanted_columns(df)\n df[\"weeks\"] = df['created_ch'].dt.week\n\n # remove characteres\n df = prepro.normalization()\n\n # run tokenizer\n df = prepro.tokenizations()\n\n # remove characteres\n df = prepro.stop_word_remover()\n\n # remove characteres\n df = prepro.stemming_lemmatization()\n\n return df\n\n\n\n\n # def replace_week_numbers(df):\n # \"\"\"\n # functiion that change week number from 1 to 19\n # \"\"\"\n \n # return sorted(df.weeks.unique())\n\n # df[\"weeks_num\"] = df[\"weeks\"].map(lambda x: replace_week_numbers(df).index(x)+1 if(x in replace_week_numbers(df)) else np.nan)", "def day_07_a() -> int:\n return 0", "def _create_week_dates_text(self):\n week_start = []\n week_end = []\n week_text = []\n week_start.append(self.start_date)\n week_end.append(self.start_date + timedelta(days=6))\n week_start.append(week_end[0] + timedelta(days=1))\n week_end.append(self.display_end_date)\n for i in (0,1):\n week_start_month = week_start[i].strftime(\"%b\")\n week_start_day = week_start[i].strftime(\"%d\").lstrip(\"0\")\n week_end_month = week_end[i].strftime(\"%b\")\n week_end_day = week_end[i].strftime(\"%d\").lstrip(\"0\")\n week_text.append(\"%s %s - %s %s\" %(week_start_month, \n week_start_day, week_end_month, week_end_day))\n return week_text", "def _normalize_publication_datetime(self, article_publication_datetime):\n datetime_parsed = dateparser.parse(article_publication_datetime)\n week_start = datetime_parsed - datetime.timedelta(\n days=datetime_parsed.weekday()\n )\n week_end = week_start + datetime.timedelta(days=6)\n\n return (\n week_start.strftime('%d-%m-%Y'),\n week_end.strftime('%d-%m-%Y')\n )", "def wkday_on_first(yr, mon): # returns day of week of first of month of the given year (1/1/2016)\r\n TotalDays = 0\r\n for x in range(1754, yr):\r\n YearNum = yeardays(x)\r\n TotalDays += YearNum\r\n for x in range(1, mon):\r\n MonNum = monthdays(yr, x)\r\n TotalDays += MonNum\r\n WhatDayNum = TotalDays % 7\r\n WhatDay = [\"Tues\", \"Wedn\", \"Thu\", \"Fri\", \"Sat\", \"Mon\"]\r\n return WhatDay[WhatDayNum]", "def week_fromordinal(cls, ordinal):\n return int(math.floor(cls.day_fromordinal(ordinal) / 7)) + 1", "def getSubtitleTable(date) -> str:\n return \"\"\"| Start of the day | Weeks until NIMCET |\n| ---------------- | -----------------: |\n| {time} | {weeks} weeks |\"\"\".format(time=formattedTimeNow(), weeks=round((datetime(2021, 5, 21) - date).days/7, 1))", "def get_week(time_index):\n return np.array(time_index.week).reshape(-1,1)", "def get_week_range(year, week):\n first_day = datetime.strptime(f\"{year}-W{week}-1\", \"%Y-W%W-%w\").date()\n last_day = first_day + timedelta(days=6)\n return first_day, last_day", "def week(self):\n J = self.JulianDay()\n d4 = (J + 31741 - (J % 7)) % 146097 % 36524 % 1461\n L = d4 // 1460\n d1 = ((d4 - L) % 365) + L\n return d1 // 7 + 1", "def day_06_b() -> int:\n return 0", "def test_weeks(self):\n d = datetime(2014, 1, 29)\n eq_(week_start(d), datetime(2014, 1, 27, 0, 0, 0))\n eq_(week_end(d), datetime(2014, 2, 2, 23, 59, 59))", "def the_week_url():\n return '/timeline/%d/%02d/%d/' % \\\n (datetime.now().year, datetime.now().month, timekit.monthweek(datetime.now()))", "def test_weekly_resolution_hindcast(daily_initialized, daily_obs):\n weekly_hindcast = daily_initialized.resample(init=\"W\").mean()\n weekly_obs = daily_obs.resample(time=\"W\").mean()\n weekly_hindcast.lead.attrs[\"units\"] = \"weeks\"\n assert compute_hindcast(weekly_hindcast, weekly_obs).all()", "def nflweek(self, irc, msg, args, optlist, optweek):\n \n url = self._b64decode('aHR0cDovL3MzLmFtYXpvbmF3cy5jb20vbmZsZ2MvYWxsU2NoZWR1bGUuanM=')\n \n usePre, useNext, outputWeek = False, False, False\n for (option, arg) in optlist:\n if option == 'pre':\n usePre = True\n \n if optweek:\n if optweek == \"next\":\n useNext = True\n elif optweek.isdigit():\n if usePre: \n if 1 <= int(optweek) <= 4:\n outputWeek = \"Preseason Week %s\" % optweek\n else:\n irc.reply(\"ERROR: Preseason week number must be between 1 and 4.\")\n return\n else:\n if 1 <= int(optweek) <= 17:\n outputWeek = \"Week %s\" % optweek\n else:\n irc.reply(\"ERROR: Week must be between 1-17\")\n return \n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Failed to open: %s\" % url)\n return\n \n jsondata = json.loads(html)\n\n week = jsondata.get('week', None) # work with the week data so we know where we are.\n\n if week is None:\n irc.reply(\"Failed to load schedule.\")\n return\n\n currentWeekName = week.get('current', {'current': None}).get('weekName', None) \n nextWeekName = week.get('next', {'next': None}).get('weekName', None) \n\n if currentWeekName is None:\n irc.reply(\"Cannot figure out the current week.\")\n return\n\n games = jsondata.get('content', None) # data in games.\n \n if games is None:\n irc.reply(\"Failed to load the games data.\")\n return\n \n if outputWeek:\n games = [item['games'] for item in games if item['weekName'] == outputWeek]\n weekOutput = outputWeek\n elif useNext:\n games = [item['games'] for item in games if item['weekName'] == nextWeekName]\n weekOutput = nextWeekName\n else:\n games = [item['games'] for item in games if item['weekName'] == currentWeekName]\n weekOutput = currentWeekName\n \n append_list = []\n\n for games in games:\n for t in games:\n awayTeam = self._translateTeam('team', 'nid', t['awayTeamId'])\n homeTeam = self._translateTeam('team', 'nid', t['homeTeamId'])\n append_list.append(\"[\" + t['date']['num'] + \"] \" + awayTeam + \"@\" + homeTeam + \" \" + t['date']['time'])\n \n descstring = string.join([item for item in append_list], \" | \")\n output = \"{0} :: {1}\".format(ircutils.bold(weekOutput), descstring)\n \n irc.reply(output)", "def week_start_on_monday(weekday):\n return (weekday - 1 + 6) % 7 + 1", "def get_week_date():\n return timezone.now()+timezone.timedelta(days=6)" ]
[ "0.6462268", "0.64046675", "0.58653", "0.5658921", "0.5573468", "0.5554211", "0.5514348", "0.5287353", "0.5228355", "0.51670694", "0.5163674", "0.5142728", "0.513467", "0.5127089", "0.51245564", "0.5107337", "0.5054805", "0.5047123", "0.503464", "0.50195706", "0.50117457", "0.50092167", "0.49979863", "0.49966592", "0.49919719", "0.49884138", "0.49822214", "0.49758613", "0.49383804", "0.49240315" ]
0.69929993
0
process_arguments turns date into datetime
def test_date_only(self): args = { '--directory': './', '<date>': '2016-11-15', '<header>': None, '<seq>': None, '<week>': None } expected = { '--directory': Path('./'), '<date>': datetime(2016, 11, 15), '<header>': None, '<seq>': None, '<week>': None } self.assertEqual( star_barcode.process_arguments(args), expected )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocess_dates(args):\n if 'date' in args:\n if args.get('period') == 'range' and 'end_date' in args:\n args['date'] = '{},{}'.format(args['date'],\n args['end_date'])\n return args", "def parse_date_arg(date_arg):\n return datetime.datetime.strptime(date_arg, DATE_FORMAT)", "def read_arguments(self, options):\n if options['datetime']:\n dt_from = parser.parse(options['datetime'])\n else:\n dt_from = datetime.now() - timedelta(hours=1)\n dt_from = dt_from.replace(minute=0, second=0, microsecond=0)\n\n if options['to_datetime']:\n dt_to = parser.parse(options['to_datetime'])\n else:\n dt_to = dt_from\n dt_to = dt_to.replace(minute=59, second=59, microsecond=999999)\n return dt_from, dt_to", "def _restricted_dates(date):\n _dates = list(date)\n try:\n return_date = datetime.strptime(date, '%Y-%m-%d').date()\n # end_date = datetime.strptime(dates[1], '%Y-%m-%d').date()\n except ValueError:\n raise argparse.ArgumentTypeError(\n f\"Could not parse dates. Did you format them yyyy-mm-dd? Dates received:\\n{date}\")\n\n # if start_date > end_date:\n # raise argparse.ArgumentTypeError(\n # f\"Start date {start_date} may not be later than end date {end_date}\")\n # return [start_date, end_date, 55]\n return return_date", "def __call__(self, value):\n try:\n return to_utc_datetime(value)\n except Exception as e:\n raise argparse.ArgumentTypeError(\n \"Start time and end time filters must be in the ISO 8601 UTC format: YYYY-MM-DDThh:mm:ssZ \"\n f\"(e.g. 1984-09-15T19:20:30Z or 1984-09-15). {e}\"\n )", "def test_parse_args(self):\n args = '%s %s' % (self.date1, self.date1)\n args = modis.parse_args(args.split(' '))\n self.assertEqual(args.start_date, self.date1)\n self.assertEqual(args.end_date, self.date1)", "def read_arguments(self, options):\n if options['datetime']:\n dt_from = parser.parse(options['datetime'])\n else:\n dt_from = datetime.now() - timedelta(hours=2)\n dt_from = dt_from.replace(minute=0, second=0, microsecond=0)\n\n if options['to_datetime']:\n dt_to = parser.parse(options['to_datetime'])\n else:\n dt_to = dt_from\n dt_to = dt_to.replace(minute=0, second=0, microsecond=0)\n\n return dt_from, dt_to", "def date_parser(date_arg):\n\n return datetime.datetime.strptime(date_arg, '%Y-%m-%d')", "def _get_normal_date(self, args):\n\n func1, func2, func3 = args\n self.assertIsNotNone(func1(20130201, \"20190120\"))\n self.assertIsNotNone(func2(\"2013/02/01\", \"2019-01-20\"))\n self.assertIsNotNone(func3(r\"2013-/\\-02~@-\\/-@~01\",\n pd.to_datetime('2019-01-20')))", "def validate_date_args(self):\n\n date_args = self.args[1:3]\n\n if not all([self.validate_date_format(x) for x in date_args]):\n raise InvalidDateFormatError", "def _parse_args(self, *args, **kw):\n\n datefmt = kw.get('datefmt', getDefaultDateFormat())\n d = t = s = None\n ac = len(args)\n microsecs = None\n\n if ac == 10:\n # Internal format called only by DateTime\n yr, mo, dy, hr, mn, sc, tz, t, d, s = args\n elif ac == 11:\n # Internal format that includes milliseconds (from the epoch)\n yr, mo, dy, hr, mn, sc, tz, t, d, s, millisecs = args\n microsecs = millisecs * 1000\n\n elif ac == 12:\n # Internal format that includes microseconds (from the epoch) and a\n # flag indicating whether this was constructed in a timezone naive\n # manner\n yr, mo, dy, hr, mn, sc, tz, t, d, s, microsecs, tznaive = args\n if tznaive is not None: # preserve this information\n self._timezone_naive = tznaive\n\n elif not args or (ac and args[0] is None):\n # Current time, to be displayed in local timezone\n t = time()\n lt = safelocaltime(t)\n tz = self.localZone(lt)\n ms = (t - math.floor(t))\n s, d = _calcSD(t)\n yr, mo, dy, hr, mn, sc = lt[:6]\n sc = sc + ms\n self._timezone_naive = False\n\n elif ac == 1:\n arg = args[0]\n\n if arg == '':\n raise SyntaxError(arg)\n\n if isinstance(arg, DateTime):\n \"\"\"Construct a new DateTime instance from a given\n DateTime instance.\n \"\"\"\n t = arg.timeTime()\n s, d = _calcSD(t)\n yr, mo, dy, hr, mn, sc, tz = arg.parts()\n\n elif isinstance(arg, datetime):\n yr, mo, dy, hr, mn, sc, numerictz, tznaive = \\\n self._parse_iso8601_preserving_tznaive(arg.isoformat())\n if arg.tzinfo is None:\n self._timezone_naive = True\n tz = None\n else:\n self._timezone_naive = False\n # if we have a pytz tzinfo, use the `zone` attribute\n # as a key\n tz = getattr(arg.tzinfo, 'zone', numerictz)\n ms = sc - math.floor(sc)\n x = _calcDependentSecond2(yr, mo, dy, hr, mn, sc)\n\n if tz:\n try:\n zone = _TZINFO[tz]\n except DateTimeError:\n try:\n zone = _TZINFO[numerictz]\n except DateTimeError:\n raise DateTimeError(\n 'Unknown time zone in date: %s' % arg)\n tz = zone.tzinfo.zone\n else:\n tz = self._calcTimezoneName(x, ms)\n s, d, t, microsecs = _calcIndependentSecondEtc(tz, x, ms)\n\n elif (isinstance(arg, basestring) and\n arg.lower() in _TZINFO._zidx):\n # Current time, to be displayed in specified timezone\n t, tz = time(), _TZINFO._zmap[arg.lower()]\n ms = (t - math.floor(t))\n # Use integer arithmetic as much as possible.\n s, d = _calcSD(t)\n x = _calcDependentSecond(tz, t)\n yr, mo, dy, hr, mn, sc = _calcYMDHMS(x, ms)\n\n elif isinstance(arg, basestring):\n # Date/time string\n iso8601 = iso8601Match(arg.strip())\n fields_iso8601 = iso8601 and iso8601.groupdict() or {}\n if fields_iso8601 and not fields_iso8601.get('garbage'):\n yr, mo, dy, hr, mn, sc, tz, tznaive = \\\n self._parse_iso8601_preserving_tznaive(arg)\n self._timezone_naive = tznaive\n else:\n yr, mo, dy, hr, mn, sc, tz = self._parse(arg, datefmt)\n\n if not self._validDate(yr, mo, dy):\n raise DateError('Invalid date: %s' % arg)\n if not self._validTime(hr, mn, int(sc)):\n raise TimeError('Invalid time: %s' % arg)\n ms = sc - math.floor(sc)\n x = _calcDependentSecond2(yr, mo, dy, hr, mn, sc)\n\n if tz:\n try:\n tz = _TZINFO._zmap[tz.lower()]\n except KeyError:\n if numericTimeZoneMatch(tz) is None:\n raise DateTimeError(\n 'Unknown time zone in date: %s' % arg)\n else:\n tz = self._calcTimezoneName(x, ms)\n s, d, t, microsecs = _calcIndependentSecondEtc(tz, x, ms)\n\n else:\n # Seconds from epoch, gmt\n t = arg\n lt = safelocaltime(t)\n tz = self.localZone(lt)\n ms = (t - math.floor(t))\n s, d = _calcSD(t)\n yr, mo, dy, hr, mn, sc = lt[:6]\n sc = sc + ms\n\n elif ac == 2:\n if isinstance(args[1], basestring):\n # Seconds from epoch (gmt) and timezone\n t, tz = args\n ms = (t - math.floor(t))\n try:\n tz = _TZINFO._zmap[tz.lower()]\n except KeyError:\n if numericTimeZoneMatch(tz) is None:\n raise DateTimeError('Unknown time zone: %s' % tz)\n # Use integer arithmetic as much as possible.\n s, d = _calcSD(t)\n x = _calcDependentSecond(tz, t)\n yr, mo, dy, hr, mn, sc = _calcYMDHMS(x, ms)\n else:\n # Year, julian expressed in local zone\n t = time()\n lt = safelocaltime(t)\n tz = self.localZone(lt)\n yr, jul = args\n yr = _correctYear(yr)\n d = (_julianday(yr, 1, 0) - jd1901) + jul\n x_float = d * 86400.0\n x_floor = math.floor(x_float)\n ms = x_float - x_floor\n x = long(x_floor)\n yr, mo, dy, hr, mn, sc = _calcYMDHMS(x, ms)\n s, d, t, microsecs = _calcIndependentSecondEtc(tz, x, ms)\n else:\n # Explicit format\n yr, mo, dy = args[:3]\n hr, mn, sc, tz = 0, 0, 0, 0\n yr = _correctYear(yr)\n if not self._validDate(yr, mo, dy):\n raise DateError('Invalid date: {}'.format(args))\n args = args[3:]\n if args:\n hr, args = args[0], args[1:]\n if args:\n mn, args = args[0], args[1:]\n if args:\n sc, args = args[0], args[1:]\n if args:\n tz, args = args[0], args[1:]\n if args:\n raise DateTimeError('Too many arguments')\n if not self._validTime(hr, mn, sc):\n raise TimeError('Invalid time: %s' % repr(args))\n\n x = _calcDependentSecond2(yr, mo, dy, hr, mn, sc)\n ms = sc - math.floor(sc)\n if tz:\n try:\n tz = _TZINFO._zmap[tz.lower()]\n except KeyError:\n if numericTimeZoneMatch(tz) is None:\n raise DateTimeError('Unknown time zone: %s' % tz)\n else:\n # Get local time zone name\n tz = self._calcTimezoneName(x, ms)\n s, d, t, microsecs = _calcIndependentSecondEtc(tz, x, ms)\n\n self._dayoffset = int((_julianday(yr, mo, dy) + 2) % 7)\n # Round to nearest microsecond in platform-independent way. You\n # cannot rely on C sprintf (Python '%') formatting to round\n # consistently; doing it ourselves ensures that all but truly\n # horrid C sprintf implementations will yield the same result\n # cross-platform, provided the format asks for exactly 6 digits after\n # the decimal point.\n sc = round(sc, 6)\n if sc >= 60.0: # can happen if, e.g., orig sc was 59.9999999\n sc = 59.999999\n self._nearsec = math.floor(sc)\n self._year, self._month, self._day = yr, mo, dy\n self._hour, self._minute, self._second = hr, mn, sc\n self.time, self._d, self._tz = s, d, tz\n # self._micros is the time since the epoch\n # in long integer microseconds.\n if microsecs is None:\n microsecs = long(round(t * 1000000.0))\n self._micros = microsecs", "def valid_date(input_date):\n try:\n input_dt = dt.datetime.strptime(input_date, \"%Y-%m-%d\")\n return input_date\n except ValueError:\n msg = \"Not a valid date: '{0}'.\".format(input_date)\n raise argparse.ArgumentTypeError(msg)", "def test_toDateUsesArgumentsToMakeADate(self):\n log = logfile.DailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n date = (2014, 10, 22)\n seconds = time.mktime(date + (0,) * 6)\n\n logDate = log.toDate(seconds)\n self.assertEqual(date, logDate)", "def MakeDate(*args):\n raise NotImplementedError(\"MakeDate has not been written yet. Passsed: %s\" % (args, ))", "def test_convert_datetime():", "def valid_args(args):\n is_valid = True\n\n # valid date format?\n try:\n datetime.datetime(year=args.year, month=args.month, day=args.day)\n except Exception:\n traceback.print_exc()\n is_valid = False\n\n print(f\"Arguments: {args}\")\n return is_valid", "def main():\n\t\n\tusageString = \"\"\" This is a commandline interface for converting a \n\tdate as text into a datetime object. \"\"\"\n\n\tif( len(sys.argv) == 1):\n\t \tprint usageString\n\telse:\t \t\n\t\targString = ' '.join(sys.argv[1:])\n\t\t\n\t\te = getBestDateFromText(argString);\n\t\t\n\t\tif (e is None):\n\t\t\tprint \"no date match for \" + argString\n\t\telse:\n\t\t\tprint \"date computed is: \" + str(e)", "def date(*args, date: bool=True, format: AnyStr=\"\", shortDate: bool=True, shortTime: bool=True,\n time: bool=True, **kwargs)->AnyStr:\n pass", "def _parse_args(input_date, input_meal):\n parser = ArgumentParser()\n parser.add_argument('-d', '--date', type=str)\n parser.add_argument('-m', '--meal', type=str)\n args = parser.parse_args()\n # Allows getting the args from either CLI or as the function parameters\n query_date = args.date or input_date\n query_meal = args.meal or input_meal\n # Validate and sanitize the meal\n if query_meal and query_meal not in constants.MEAL_CHOICES:\n raise ValueError(\"Refeições suportadas são apenas 'almoço', 'jantar' e 'todas'.\")\n # Validate and sanitize the date\n if query_date == constants.DATE_TOMORROW:\n query_date = date.today() + timedelta(days=1)\n else:\n try:\n query_date = parse_date(args.date if args.date else input_date or None)\n except ValueError:\n query_date = None\n return query_date, query_meal", "def date_type(arg):\n year_formats = (\n '%Y-%m-%d',\n '%Y%m%d',\n '%d',\n '%j',\n )\n\n for yf in year_formats:\n try:\n return date(*strptime(arg, yf)[0:3])\n except ValueError:\n pass\n\n raise ArgumentTypeError(\n 'Unable to coerce {} to a date. Try %Y-%m-%d'.format(arg)\n )", "def dateType(string):\n try:\n date = datetime.datetime.strptime(string, '%Y-%m-%d').date()\n except ValueError:\n msg = \"%r is not a valid date\" % string\n raise argparse.ArgumentTypeError(msg)\n return date", "def _preprocess(self, args):\n def preprocess_dates(args):\n \"\"\"Combine date and end_date into a range.\"\"\"\n if 'date' in args:\n if args.get('period') == 'range' and 'end_date' in args:\n args['date'] = '{},{}'.format(args['date'],\n args['end_date'])\n return args\n\n def preprocess_bools(args):\n \"\"\"Convert all booleans to integers.\"\"\"\n for arg in args:\n if type(args[arg]) == bool:\n args[arg] = int(args[arg])\n return args\n for name, value in locals().items():\n if name.startswith('preprocess_') and callable(value):\n args = value(args)\n return args", "def _process_date(self, data):\n def helper(val):\n # Sometime the date has a (1) or (2) following it. Strip that off\n # so that we can successful convert to date.\n s = val.find(\" (\")\n if s >= 0:\n val = val[0:s]\n dv = dt.datetime.strptime(val, '%A, %b %d')\n dv = dv.replace(year=self.start_date.year)\n return dv\n data['Date'] = data['Date'].apply(helper)\n return data", "def test_check_args_submit_time(self):\n test_time = \"2021/06/18 11:00:00\"\n with self.assertRaises(TypeError) as context:\n self.duedate.check_args(test_time, self.test_turn_time)\n self.assertTrue(\"Invalid input format. 'submit_time' must be <datetime> format.\" in str(\n context.exception))", "def valid_date_type(arg_date_str):\n try:\n return dt.datetime.strptime(arg_date_str, \"%Y-%m-%d\")\n except ValueError:\n msg = \"Given Date ({0}) not valid! Expected format, YYYY-MM-DD!\".format(arg_date_str)\n raise argparse.ArgumentTypeError(msg)", "def arg_parse():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-s', '--start_date', nargs='?', default=SETTINGS.MIN_START_DATE, \n type=str, help=f'Start date string in format YYYYMMDD, between '\n f'{SETTINGS.MIN_START_DATE} and {SETTINGS.MAX_END_DATE}', metavar='')\n parser.add_argument('-e', '--end_date', nargs='?', default=SETTINGS.MAX_END_DATE,\n type=str, help=f'End date string in format YYYYMMDD, between '\n f'{SETTINGS.MIN_START_DATE} and {SETTINGS.MAX_END_DATE}', metavar='')\n \n return parser.parse_args()", "def post_date(context, *args, **kwargs):\n obj = context['object']\n date_format = kwargs.get('format', \"l j, F\")\n return date(obj.publication_date, date_format)", "def main():\n date_time_conversion('2018-12-30T09:37:56.000001Z', '2020-07-12T07:56:43.000001Z', 0, 0, 0, 0)", "def strip_date(self, arg, line_number=0):\n try:\n dt = datetime.strptime(arg, \"%d %b %Y\")\n return dt\n except ValueError:\n raise ValueError(f\"US42 - Illegitimate date of {arg}. GEDCOM line: {line_number}\")\n else:\n return 'NA'", "def convert_date(adate):\n\tprint \"date given: \" + adate\n\t# stuff\n\tprint \"epoch time for date: \"" ]
[ "0.6994293", "0.64886105", "0.6331184", "0.62835664", "0.62754744", "0.6230057", "0.62074155", "0.6174838", "0.61325836", "0.61302996", "0.6023809", "0.6020006", "0.600959", "0.6000738", "0.5934657", "0.58660245", "0.58435833", "0.5829836", "0.58003515", "0.5754549", "0.5729049", "0.5723078", "0.5721359", "0.56926095", "0.5674842", "0.5638503", "0.5635553", "0.56314725", "0.5627081", "0.5615206" ]
0.67965895
1
Create a Connectable Observable. A multicasted Observable (rx_publish) uses a Subject under the hood to make multiple Observers see the same Observable execution.
def rx_publish( an_observable: Observable, subject_handler: Optional[SubjectHandler] = None, connection_handler: Optional[ConnectableObservableHandler] = None, subject_factory: SubjectFactory = rx_subject, ) -> ConnectableObservable: _ref_count_activated = False # Flag to enable auto-connect _ref_count = 0 # subscription count (used for auto-connect) _subscription: Optional[Subscription] = None # observale subscription _connectable_observable: Optional[ConnectableObservable] = None # for ref_count return value _subject: Optional[Subject] = None async def _unsubscribe() -> None: nonlocal _subscription if _subscription: await _subscription() # notify if connection_handler: await connection_handler.on_disconnect() _subscription = None async def _connect() -> Subscription: """Connection Handler implementation.""" nonlocal _subscription, _subject if _subscription: return _unsubscribe if not _subject: # pragma: no cover # never reached raise RuntimeError("unexpected error") _subscription = await an_observable.subscribe(an_observer=_subject) if connection_handler: await connection_handler.on_connect() return _unsubscribe async def _on_subscribe(count: int, source: Observer) -> None: nonlocal _subscription, _ref_count_activated, _ref_count _ref_count += 1 # forward event if subject_handler: await subject_handler.on_subscribe(count=count, source=source) # auto connect if _ref_count_activated and _subscription is None and _ref_count == 1: await _connect() async def _on_unsubscribe(count: int, source: Observer) -> None: nonlocal _subscription, _ref_count_activated, _ref_count _ref_count -= 1 # forward event if subject_handler: await subject_handler.on_unsubscribe(count=count, source=source) # auto disconnect if _ref_count_activated and _subscription and _ref_count == 0: await _unsubscribe() # our multicast subject used under the hood _subject = subject_factory(subject_handler=_subject_handler(on_subscribe=_on_subscribe, on_unsubscribe=_on_unsubscribe)) async def _ref_count_handler() -> Observable: """Autostart the multicasted observable. ref_count makes the multicasted Observable automatically start executing when the first subscriber arrives, and stop executing when the last subscriber leaves. """ nonlocal _ref_count_activated, _connectable_observable if not _connectable_observable: # pragma: no cover # never reached raise RuntimeError("unexpected error") _ref_count_activated = True return rx_create(subscribe=_connectable_observable.subscribe) # our connectable observable _connectable_observable = connectable_observable(connect=_connect, ref_count=_ref_count_handler, subscribe=_subject.subscribe) return _connectable_observable
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def auto_connect(self, subscriber_count: int = 1) -> Observable[_T]:\n\n connectable_subscription: List[Optional[abc.DisposableBase]] = [None]\n count = [0]\n source = self\n is_connected = [False]\n\n if subscriber_count == 0:\n connectable_subscription[0] = source.connect()\n is_connected[0] = True\n\n def subscribe(\n observer: abc.ObserverBase[_T],\n scheduler: Optional[abc.SchedulerBase] = None,\n ) -> abc.DisposableBase:\n count[0] += 1\n should_connect = count[0] == subscriber_count and not is_connected[0]\n subscription = source.subscribe(observer)\n if should_connect:\n connectable_subscription[0] = source.connect(scheduler)\n is_connected[0] = True\n\n def dispose() -> None:\n subscription.dispose()\n count[0] -= 1\n is_connected[0] = False\n\n return Disposable(dispose)\n\n return Observable(subscribe)", "def __init__(self, subscribe):\n def _subscribe(observer):\n def fix_subscriber(subscriber):\n \"\"\"Fix subscriber to check for None or function returned to \n decorate as Disposable\"\"\"\n \n if subscriber is None:\n subscriber = Disposable.empty()\n elif type(subscriber) == types.FunctionType:\n subscriber = Disposable(subscriber)\n\n return subscriber\n\n def set_disposable(scheduler=None, value=None):\n try:\n auto_detach_observer.disposable = fix_subscriber(subscribe(auto_detach_observer))\n except Exception as ex:\n if not auto_detach_observer.fail(ex):\n raise ex\n\n auto_detach_observer = AutoDetachObserver(observer)\n\n if current_thread_scheduler.schedule_required():\n current_thread_scheduler.schedule(set_disposable)\n else:\n set_disposable()\n\n return auto_detach_observer\n \n super(AnonymousObservable, self).__init__(_subscribe)", "def merge_observable(self):\n sources = self\n\n def subscribe(observer):\n m = SingleAssignmentDisposable()\n group = CompositeDisposable()\n is_stopped = False\n group.add(m)\n \n def on_next(inner_source):\n inner_subscription = SingleAssignmentDisposable()\n group.add(inner_subscription)\n\n def on_complete():\n nonlocal group\n \n group.remove(inner_subscription)\n if is_stopped and group.length == 1:\n observer.on_completed()\n \n disposable = inner_source.subscribe(\n observer.on_next,\n observer.on_error, \n on_complete)\n \n inner_subscription.disposable = disposable\n \n def on_complete():\n nonlocal is_stopped\n\n is_stopped = True\n if group.length == 1:\n observer.on_completed()\n \n m.disposable = sources.subscribe(on_next, observer.on_error, on_complete)\n return group\n \n return AnonymousObservable(subscribe)", "def to_rx(source: FlowableMixin, batched: bool = None, subscribe_schduler: Scheduler = None):\n\n class FromFlowableObservable(Observable):\n def _subscribe_core(self, observer: typing.Observer, scheduler: typing.Scheduler = None):\n class RxBPScheduler(SchedulerBase):\n def __init__(self, underlying):\n super().__init__()\n\n self.underlying = underlying\n\n def sleep(self, seconds: float) -> None:\n pass\n\n @property\n def now(self) -> datetime:\n return self.underlying.now\n\n @property\n def is_order_guaranteed(self) -> bool:\n # unknown property, therefore select pessimistically\n return False\n\n def schedule(self, action: ScheduledAction, state: TState = None) -> Disposable:\n return self.underlying.schedule(action=action, state=state)\n\n def schedule_relative(self, duetime: RelativeTime, action: ScheduledAction,\n state: TState = None) -> Disposable:\n return self.underlying.schedule_relative(duetime=duetime, action=action, state=state)\n\n def schedule_absolute(self, duetime: AbsoluteTime, action: ScheduledAction,\n state: TState = None) -> Disposable:\n return self.underlying.schedule_absolute(duetime=duetime, action=action, state=state)\n\n def schedule_periodic(self, period: RelativeTime, action: ScheduledPeriodicAction,\n state: Optional[TState] = None) -> Disposable:\n raise NotImplementedError\n\n class ToRxObserver(Observer):\n @property\n def is_volatile(self):\n return False\n\n def on_next(self, elem: ElementType):\n for e in elem:\n observer.on_next(e)\n return continue_ack\n\n def on_error(self, err):\n observer.on_error(err)\n\n def on_completed(self):\n observer.on_completed()\n\n to_rx_observer = ToRxObserver()\n\n if batched is True:\n def on_next(v):\n batch = list(v())\n observer.on_next(batch)\n return continue_ack\n\n to_rx_observer.on_next = on_next\n\n trampoline_scheduler = subscribe_schduler or TrampolineScheduler()\n scheduler_ = RxBPScheduler(underlying=scheduler) if scheduler is not None else trampoline_scheduler\n subscriber = init_subscriber(scheduler=scheduler_, subscribe_scheduler=trampoline_scheduler)\n # observer_info = init_observer_info(observer=to_rx_observer)\n return source.subscribe(\n observer=to_rx_observer,\n subscribe_scheduler=subscriber.subscribe_scheduler,\n scheduler=subscriber.subscribe_scheduler,\n )\n\n return FromFlowableObservable()", "def subject(\n subscribe: Subscribe, on_next: NextHandler, on_error: ErrorHandler = default_error, on_completed: CompleteHandler = default_on_completed\n) -> Subject:\n return SubjectDefinition(subscribe=subscribe, on_next=on_next, on_error=on_error, on_completed=on_completed)", "def CreateSubscribeTransaction(self, dest, once=False):\n c = Subscribe(dest, self.node_id, once)\n self.connections.append((\"REACTIVE\", c))\n return c", "def subscribe(self, subject):\n pass", "def rx_amb(*observables: Observable) -> Observable:\n\n if len(observables) < 1:\n raise RuntimeError(\"#observables must be greather than 1\")\n\n async def _subscribe(an_observer: Observer) -> Subscription:\n\n _subject = rx_subject()\n\n # we send the first\n _first_subscription: Subscription = await rx_first(observable=_subject).subscribe(an_observer)\n\n # subscribe to all observables in parallele\n _subscriptions: List[Subscription] = []\n _tasks = []\n async with curio.TaskGroup(wait=all) as g:\n for an_observable in observables:\n _tasks.append(await g.spawn(_build_observer_and_subscribe, an_observable, _subject))\n _subscriptions = [t.result for t in _tasks]\n\n async def _subscription_handler():\n nonlocal _first_subscription, _subscriptions\n if _first_subscription:\n await _first_subscription()\n for _unsub in _subscriptions:\n if _unsub:\n await _unsub()\n\n return _subscription_handler\n\n return rx_create(subscribe=_subscribe, max_observer=1)", "def connect(self):\n assert self.listening\n assert not self.connected\n ctx = zmq.Context.instance()\n port = NODE_INFOS[self.ID].port\n self._send_socket = ctx.socket(zmq.PUB)\n self._send_socket.bind(f\"tcp://*:{port}\")\n self.connected = True", "def __init__(self, data_source, extra_protocols=[]):\n\t\tObservable.__init__(self)\n\t\tThread.__init__(self)\n\t\tself.protocols = extra_protocols\n\t\tself.data_source = data_source\n\t\tself.id = -1\n\t\tself.name = type(self).__name__\n\t\tself.running = True", "def subscribe(self):\n fd = libplasma.subscribe(self.conn)\n self.notification_sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM)\n # Make the socket non-blocking.\n self.notification_sock.setblocking(0)", "def connect(self):\n self.socket.connect(f'tcp://{self.ip}:{self.port}')\n self.socket.send_string('PUB_PORT')\n self.pub_port = self.socket.recv_string()\n self.pub_socket = zmq.Socket(self.ctx, zmq.PUB)\n self.pub_socket.connect(f\"tcp://{self.ip}:{self.pub_port}\")", "def mqttConnect(self):\n clientId = 'projects/{}/locations/{}/registries/{}/devices/{}'.format(self.projectId, \n self.cloudRegion, \n self.registryId, \n self.deviceId)\n mqttc = mqtt.Client(client_id = clientId)\n \n # With Google Cloud IoT Core, the username field is ignored, and the\n # password field is used to transmit a JWT to authorize the device.\n mqttc.username_pw_set(\n username='unused',\n password=self.create_jwt())\n\n # Enable SSL/TLS support.\n mqttc.tls_set(ca_certs=self.caCert, tls_version=ssl.PROTOCOL_TLSv1_2) \n self.blogger.info('Starting connection to: {0}:{1}'.format(self.mqttHost, self.mqttPort))\n mqttc.on_connect = self.connectCallBack\n mqttc.on_message = self.processMessage\n mqttc.on_publish = self.publishedMessageCallBack\n mqttc.connect(self.mqttHost, port=self.mqttPort, keepalive=60)\n try:\n mqttc.subscribe(self.configTopic, qos=self.QoS)\n self.blogger.debug('Subscribed to config topic: {}'.format(self.configTopic))\n mqttc.subscribe(self.commandTopic, qos=self.QoS)\n self.blogger.debug('Subscribed to command topic: {}'.format(self.commandTopic))\n mqttc.subscribe(self.eventTopic, qos=self.QoS)\n self.blogger.debug('Subscribed to event topic: {}'.format(self.eventTopic))\n self.messageToPublish = '{\"thingy\":\"ready\"}'\n# self.publishMessage(self.eventTopic, QoS)\n except Exception as e:\n self.blogger.error('subscription failed for reason: {0}'.format(e))\n\n return mqttc", "def subscribe(observer):", "def subscribe(observer):", "def dematerialize(self) -> ObservableBase:\n\n source = self\n\n def subscribe(observer, scheduler=None):\n def on_next(value):\n return value.accept(observer)\n\n return source.subscribe_(on_next, observer.on_error, observer.on_completed, scheduler)\n return AnonymousObservable(subscribe)", "def create_controllable_sink(sink_id: str,\n operator: Callable[[rx.Observable], rx.Observable],\n sink: rx.Observable,\n ) -> rx.Observable:\n q = queue.Queue()\n index = 0\n\n lifecycle = QObservable(q).pipe(\n ops.subscribe_on(NewThreadScheduler()),\n operator,\n ops.ignore_elements(),\n ops.observe_on(AsyncIOThreadSafeScheduler(asyncio.get_event_loop())),\n )\n\n def push_item(q, sink_id):\n def _push_item(i):\n nonlocal index\n q.put_nowait(i)\n if isinstance(i, OnNext):\n index += 1\n if index == 500:\n index = 0\n return (sink_id, q.qsize())\n return None\n return ops.map(_push_item)\n\n feedback = sink.pipe(\n ops.materialize(),\n push_item(q, sink_id),\n ops.filter(lambda i: i is not None)\n )\n\n return rx.merge(lifecycle, feedback)", "def merge_all(self):\n sources = self\n\n def subscribe(observer):\n group = CompositeDisposable()\n is_stopped = False\n m = SingleAssignmentDisposable()\n group.add(m)\n \n def on_next(inner_source):\n inner_subscription = SingleAssignmentDisposable()\n group.add(inner_subscription)\n\n def on_next(x):\n observer.on_next(x)\n \n def on_completed():\n group.remove(inner_subscription)\n if is_stopped and group.length == 1:\n observer.on_completed()\n \n inner_subscription.disposable = inner_source.subscribe(on_next, observer.on_error, on_completed)\n \n def on_completed():\n is_stopped = True\n if len(group) == 1:\n observer.on_completed()\n\n m.disposable = sources.subscribe(on_next, observer.on_error, on_completed)\n return group\n\n return AnonymousObservable(subscribe)", "def of(*args) -> ObservableBase:\n from ..operators.observable.of import of\n return of(*args)", "def __get_zmq_pub(self):\n print(\"Publishing to tcp://127.0.0.1:%d channel: tweets\" % self.port)\n context = zmq.Context()\n socket = context.socket(zmq.PUB)\n socket.bind(\"tcp://127.0.0.1:%d\" % self.port)\n return socket", "def test_connect_subscriber():\n config = {\"listeners\": \"localhost:8080\"}\n registry = Registry()\n registry.new(name=\"test\", backend=\"dummy\", **config)\n\n dummy = registry[\"test\"]\n subscriber = dummy.subscribe([\"mytopic\"])\n message = subscriber.listen()\n\n assert message == \"Dummy Message\"\n subscriber._connect.assert_called_once()", "def newEventsObservable(self, modelSetKey: str) -> Subject:", "def driver(sink):\n def on_subscribe(observer, scheduler):\n def on_next(i):\n if type(i) is Create:\n observer.on_next(\n Feedback(\n i.id,\n create_controllable_sink(\n i.id,\n i.operator,\n i.observable,\n )\n )\n )\n\n else:\n observer.on_error(\"app sink unknown command: {}\".format(i))\n\n disposable = sink.connector.subscribe(\n on_next=on_next,\n on_error=observer.on_error,\n on_completed=observer.on_completed)\n\n return disposable\n\n return Source(\n feedback=rx.create(on_subscribe),\n )", "def _CreatePubsubClient():\n client = pubsub_client.PubSubClient()\n client.CreateTopic(DEVICE_NOTE_PUBSUB_TOPIC)\n client.CreateTopic(HOST_NOTE_PUBSUB_TOPIC)\n return client", "def CrossConnect(object):\n source = None # The source interface\n destination = None # The destination interface. For multicast, use multiple CrossConnect instances.\n sourceLabels = None # a Labelset with the allowed labels for the source interface, for this particular cross connect\n # (must be a subset of the allowed labelset for the source interfaces).\n destinationLabel = None # a single label for the source interface\n # A None value means that the interfaces has the \"None\" label.\n sourceLabel = None # a Labelset with the allowed labels for the source interface, for this particular cross connect\n # (must be a subset of the allowed labelset for the source interfaces)\n destinationLabel = None # a single label for the destination interface\n pass", "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(receiver):", "def _on_connect(self, client, userdata, flags, rc):\n self.subscribe(self.topic)", "async def multicast_client(url):\n host, port = pytak.parse_cot_url(url)\n stream = await pytak.asyncio_dgram.bind((host, port))\n sock = stream.socket\n # group = socket.inet_aton(host)\n # mreq = struct.pack('4sL', group, socket.INADDR_ANY)\n # sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n return stream" ]
[ "0.6327228", "0.5750856", "0.56302005", "0.5515695", "0.5454845", "0.5408904", "0.52990055", "0.5291215", "0.51549757", "0.5026792", "0.49772325", "0.4947613", "0.49465865", "0.4932727", "0.4932727", "0.48783442", "0.48331305", "0.47861823", "0.47821546", "0.47797227", "0.47577372", "0.47464898", "0.47274828", "0.46195573", "0.46086413", "0.45885292", "0.45885292", "0.45885292", "0.45885056", "0.4588016" ]
0.7431155
0
Convert a hex string to an real application tag.
def real_tag(x): if _debug: real_tag._debug("real_tag %r", x) b = xtob(x) tag = Tag(Tag.applicationTagClass, Tag.realAppTag, len(b), b) if _debug: real_tag._debug(" - tag: %r", tag) return tag
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def a2b_hex(string):\n\n if len(string) % 2 == 1:\n string = '0' + string\n\n try:\n return binascii.a2b_hex(string.encode('ascii'))\n except TypeError:\n raise Error('Invalid hexadecimal string')", "def hex2bin(hexstr: str, padding: int) -> str:\n if not hexstr.lower().startswith(\"0x\"):\n raise ValueError(\"Input hexadecimal string must have '0x' as the prefix.\")\n return \"0b\" + str(bin(int(hexstr.lower(), 0))).replace(\"0b\", \"\").zfill(padding)", "def hex(string):\n return string.encode('hex')", "def __hex2int(_hex_str):\n return int(\"0x\"+_hex_str, 16)", "def hexify_word(word):\r\n\r\n return ''.join([str(hex(ord(c))[2::]) for c in word])", "def from_hex(x):\n return base64.b16decode(x, True)", "def from_hex_str(value):\n \n return SHex(value)", "def hex2int(r: str) -> int:", "def _convert_hex(self, hex_value):\n if not isinstance(hex_value, str):\n raise TypeError(\"given hex value must be str\")\n m = HEX_RE.match(hex_value)\n if m is None:\n raise ValueError(\"given string does not seem to be Python hex\")\n sign_char, base, exp_sign, exp = [m.group(i) for i in range(1,5)]\n new_sign = \"+\" if sign_char is None else sign_char\n # Line below converts exp to hex value. The \"0x\" prefix is removed \n # with [2:]. The exponent is padded with (too many) zeros (Stata \n # requires 3 digits), and reduced to last 3 digits with [-3:].\n new_exp = (\"000\" + hex(int(exp))[2:])[-3:]\n return \"\".join((new_sign, base, 'X', exp_sign, new_exp))", "def getApplicationProcessId(binaryString, startPos=0):\n if (len(binaryString) - startPos) < PRIMARY_HEADER_BYTE_SIZE:\n raise Error(\"packet header is too small\")\n return (((binaryString[startPos + 0] * 256) + binaryString[startPos + 1]) & 0x07FF)", "def hexstring(self):\n if self.current != b\"<\":\n self.on_parser_error(\"Hexadecimal string expected\")\n self.next()\n token = b''\n self.maybe_spaces_or_comments()\n while self.is_hex_digit:\n token += self.next()\n self.maybe_spaces_or_comments()\n\n ch = self.next()\n if ch != b'>':\n self.on_parser_error(\"Wrong hexadecimal string\")\n if len(token) % 2:\n # if there is an odd number of digits - the last one should be assumed 0\n token += b'0'\n return HexString(token.decode(DEFAULT_ENCODING).upper())", "def from_hex(hexstr, width, height):\n return Glyph.from_bytes(binascii.unhexlify(hexstr.encode('ascii')), width, height)", "def from_hexstr(hexstr):\n b = Utils.ba(hexstr)\n return RAPDU.from_bytes(b)", "def sanatize_hex(data: str) -> str:\n return data.replace(\"0x\", \"\").replace(\"0X\", \"\")", "def genHexStr(instr: str) -> str:\n\n return hashlib.md5(instr.encode(\"utf-8\")).hexdigest()", "def _from_bytes(value, dummy, _int=int, _hexlify=_hexlify):\n return _int(_hexlify(value), 16)", "def bin2aid(binaid):\n aid = ''\n for i, x in enumerate(binaid):\n aid += '%02X' % x\n if i % 2:\n aid += ':'\n return aid.rstrip(':')", "def hexify(buffer):\n return ''.join('%02x' % ord(c) for c in buffer)", "def get_input_string_into_program(hex_string_input):\n # In python 3, you have byte strings and unicode stings\n # Always make a unicode sandwich, bytes(input)->unicode(inside the program)->bytes(output)\n # If this confuses you, watch this excellent talk by Ned Batchelder\n # https://www.youtube.com/watch?v=sgHbC6udIqc\n byte_string = bytes.fromhex(hex_string_input)\n # The input file in this case is hex representation of a latin-1 encoded file, which is not cool as they should be either calling it out explicitly or just sticking to unicode\n utf_string = byte_string.decode('latin-1')\n return utf_string", "def hex_to_int(hex_string):\r\n return int(hex_string, 16)", "def parse_sysex_string(s):\n return binascii.unhexlify(s.replace(' ', ''))", "def ba(hexstr_or_int):\n try:\n t1 = hexstr_or_int.lower()\n t2 = \"\".join([c if c.isalnum() else \" \" for c in t1])\n t3 = t2.split(\" \")\n out = bytearray()\n for bstr in t3:\n if bstr[0:2] == \"0x\":\n bstr = bstr[2:]\n if bstr != \"\":\n l = len(bstr)\n if(l % 2):\n bstr = \"0\"+bstr\n l+=1\n out += bytearray.fromhex(bstr)\n\n except:\n #seems arg is not a string, assume it is a int\n try:\n out = Utils.int_to_ba(hexstr_or_int)\n except:\n # seems arg is not an int, assume it is a list\n try:\n out = bytearray(hexstr_or_int)\n except:\n raise ValueError()\n return out", "def hexify(text):\r\n return ' '.join([hexify_word(word) for word in text.split()])", "def from_hexstr(hexstr):\n b = Utils.ba(hexstr)\n return CAPDU.from_bytes(b)", "def ipa2hash(ipa):\n return clean(ipa.translate(CHAR_TO_CODE))", "def de_hex(msg):\n try:\n return bytes.fromhex(msg).decode('utf-8')\n except (UnicodeDecodeError, ValueError):\n print('Invalid hexadecimal-encoded string')", "def bitstr_to_hex(a):\n return hex(bitstr_to_int(a))", "def replace_gaiji_hex_sub_helper(self, match):\n return match.group(1).decode('hex')", "def check_and_convert_add(self, hex: str):\n if len(hex) != 2:\n raise Exception(\"Address wrong length.\")\n return int(hex, 16)", "def try_tag_to_string(tag_data):\n if not isinstance(tag_data, array.array):\n return tag_data\n\n if tag_data.typecode == 'H':\n try:\n tag_data = str(tag_data.tostring().decode('utf-16'))\n except UnicodeDecodeError:\n pass\n except UnicodeEncodeError:\n pass\n except:\n raise\n\n return tag_data" ]
[ "0.52152145", "0.5183621", "0.5144421", "0.51350594", "0.5121004", "0.5116178", "0.51159996", "0.5048852", "0.504039", "0.5035954", "0.502589", "0.49917215", "0.49784186", "0.49771157", "0.49458298", "0.4939181", "0.4935022", "0.49316686", "0.49273714", "0.49200398", "0.48948154", "0.488612", "0.48754215", "0.4874494", "0.483955", "0.4835161", "0.48219794", "0.48170683", "0.48155323", "0.48132417" ]
0.62485677
0
Encode an Real object into a tag.
def real_encode(obj): if _debug: real_encode._debug("real_encode %r", obj) tag = Tag() obj.encode(tag) if _debug: real_encode._debug(" - tag: %r, %r", tag, tag.tagData) return tag
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode(self, obj):\n # type: (List[List[Any]]) -> str\n raise NotImplementedError()", "def encode(self, o):\n # Our encoding prepends an 'x:' prefix.\n return b'x:%s' % str(o.name).encode('utf-8')", "def encode(self, o):\n # Our encoding prepends an 'x:' prefix.\n return b'x:%s' % str(o.name).encode('utf-8')", "def encode_raw(objs):\n return RawWire().encode(objs)", "def serialize(cls, obj):\n return json.dumps(obj, cls=CustomTypeEncoder)", "def encode(self,b):\n raise NotImplementedError('subclasses must override encode()!')", "def serialize(self, obj):\n return dill.dumps(obj, 0).decode('latin-1')", "def serialize(self, obj):\n pass", "def encode(self, value):\r\n pass", "def encode(self, value):\n raise NotImplementedError()", "def encode_any(value: object) -> bytes:\n raise NotImplementedError()", "def encode(self, decoded):", "def JsonComplexEncoder(obj):\n if isinstance(obj, bytes):\n return str(obj)\n else:\n return obj", "def encode(self, desc):\n raise NotImplementedError", "def serialize_str(self, obj):\n if len(obj) < 0x100:\n return 'U' + struct.pack('<B', len(obj)) + obj\n return 'T' + struct.pack('<I', len(obj)) + obj", "def pack(self, obj):\n # TODO: use a JSON encoder that handles more types?\n if obj is not None:\n return json.dumps(obj)", "def encode(self, obj):\n s = super(CustomEncoder, self).encode(obj)\n # If uncompressed, postprocess for formatting\n if len(s.splitlines()) > 1:\n s = self.postprocess(s)\n return s", "def encode(out, item, *, serialization=None, subtypes=tuple()):\n raise NotImplementedError", "def serialize(self, obj):\n return obj", "def encode(self):\n \n assert False, \"Not implemented.\"", "def encode_result(value: object) -> bytes:\n raise NotImplementedError()", "def _serialize(\n self, value: typing.Any, attr: str | None, obj: typing.Any, **kwargs\n ):\n return value", "def encode(self,obj):\n import yaml\n return bytes(yaml.dump(obj),'utf-8')", "def encode(self): # pragma: no cover\n pass", "def serialize(self, obj):\n return json.dumps(obj)", "def encode(self, value):\r\n return value", "def encode(self) -> bytes:\n return pack('<B' + self.fmt, *([self.type] + self.__args))", "def encode(self):\n return (struct.pack(b\"<iii\", self.size, self.id, self.type) +\n self.body.encode(\"ascii\") + b\"\\x00\\x00\")", "def serialize(obj):\n result = base64.urlsafe_b64encode(obj)\n # this workaround is needed because in case of python 3 the\n # urlsafe_b64encode method returns string of 'bytes' class.\n result = result.decode()\n return result", "def real_decode(tag):\n if _debug: real_decode._debug(\"real_decode %r\", tag)\n\n obj = Real(tag)\n if _debug: real_decode._debug(\" - obj: %r, %r\", obj, obj.value)\n\n return obj" ]
[ "0.66648567", "0.6392294", "0.6392294", "0.63117164", "0.6230552", "0.6224176", "0.6219446", "0.6212044", "0.6202672", "0.61778486", "0.6174891", "0.6076576", "0.6026148", "0.600362", "0.59921145", "0.5973116", "0.5968331", "0.59151846", "0.59141284", "0.590889", "0.58611935", "0.58591354", "0.5850474", "0.5776419", "0.5771185", "0.57702076", "0.5759232", "0.5746977", "0.57055485", "0.56725127" ]
0.8445998
0
Decode an real application tag into an real.
def real_decode(tag): if _debug: real_decode._debug("real_decode %r", tag) obj = Real(tag) if _debug: real_decode._debug(" - obj: %r, %r", obj, obj.value) return obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def real_tag(x):\n if _debug: real_tag._debug(\"real_tag %r\", x)\n\n b = xtob(x)\n tag = Tag(Tag.applicationTagClass, Tag.realAppTag, len(b), b)\n if _debug: real_tag._debug(\" - tag: %r\", tag)\n\n return tag", "def decode(self, encoded):", "def decode(data):\n raise NotImplementedError", "def decode(data): #@NoSelf", "def decode(self, value):\r\n pass", "def read(self, tag): # type: (Number) -> (Tag, any)\n if tag.type != Types.Primitive:\n raise ASN1Error('Only primitive types can be read.')\n\n nr = tag.number\n length = tag.length\n\n bytes_data = self._read_bytes(0, length)\n self._buffer = self._buffer[length:]\n if tag.cls != Classes.Universal:\n value = bytes_data\n elif nr == Numbers.Boolean:\n value = self._decode_boolean(bytes_data)\n elif nr in (Numbers.Integer, Numbers.Enumerated):\n value = self._decode_integer(bytes_data)\n elif nr == Numbers.Null:\n value = self._decode_null(bytes_data)\n elif nr == Numbers.ObjectIdentifier:\n value = self._decode_object_identifier(bytes_data)\n elif nr in (\n Numbers.PrintableString, Numbers.IA5String, Numbers.UTCTime\n ):\n value = bytes_data.decode('utf-8')\n else:\n value = bytes_data\n\n self._resetTag()\n return value", "def decode(self, code):\n raise NotImplementedError", "def decode(a):\n return decode(a)", "def m2i(self, pkt, s):\n diff_tag, s = BER_tagging_dec(s, hidden_tag=self.ASN1_tag,\n implicit_tag=self.implicit_tag,\n explicit_tag=self.explicit_tag,\n safe=self.flexible_tag)\n if diff_tag is not None:\n # this implies that flexible_tag was True\n if self.implicit_tag is not None:\n self.implicit_tag = diff_tag\n elif self.explicit_tag is not None:\n self.explicit_tag = diff_tag\n codec = self.ASN1_tag.get_codec(pkt.ASN1_codec)\n if self.flexible_tag:\n return codec.safedec(s, context=self.context)\n else:\n return codec.dec(s, context=self.context)", "def decode(self, s):", "def decode(self, s):", "def decode(self,data):\n import yaml\n return yaml.load(data.decode('utf-8'))", "def decode_input_data(self, rawdata):\n return self.get_content_type().loads(rawdata, self)", "def decode(self, value):\r\n return value", "def decode_dynamic(self, encoded, global_params):\n raise NotImplementedError", "def decode_payload(encoded_payload):\n jwt_secret = app.config['SECRET_KEY']\n payload = jwt.decode(encoded_payload, jwt_secret, algorithms='HS256')\n\n return payload", "def _dinamic_decode(self):\n raise NotImplementedError", "def decoder(self):\n pass", "def _decode(self, input_dict):\n pass", "def decode(self, encoded: str):\n if not isinstance(encoded, str) or not encoded:\n return None\n int_encoded = self._decode_str(encoded)\n if int_encoded is None:\n return None\n int_origin = self._int_obfuscator.decode(int_encoded)\n if int_origin is None:\n return None\n str_encoded = self.__encode(int_origin)\n return int_origin if str_encoded == encoded else None", "def from_word(word, extended=False):\n code, qc, pc, addr, p = instruction.decode(word)\n decoder = instruction.from_word_std if not extended else instruction.from_word_ext\n return decoder(code, qc, pc, addr)", "def decode(packed_data, custom_decoder=None):\n decoder = make_decoder(custom_decoder)\n structure = msgpack.unpackb(packed_data, ext_hook=decoder, encoding='utf-8')\n return structure", "def decode(encoded):\n if encoded is None:\n return None\n\n try:\n s = decode(APP.config['SECRET_KEY'], encoded)\n return json.loads(s)\n except Exception as err:\n LOGGER.error('Error decoding auth: %s' % str(err))\n raise err", "def test_decode(self):\n pass # TODO(tlarsen)", "def get_application_info( tree ):\n application_name = None\n # most machines store the machine name string in the tag 'ApplicationName'\n for application_name in tree.getroot().iter( 'ApplicationName' ):\n application_name = application_name.text\n break\n # NovaSeq stores the machine name string in the tag 'Application'\n if( application_name == None ):\n for application_name in tree.getroot().iter( 'Application' ):\n application_name = application_name.text\n break\n if( application_name == None ):\n raise ValueError( 'Unable to find Application* element in BCL RunParameters.xml' )\n\n application_version = None\n for application_version in tree.getroot().iter( 'ApplicationVersion' ):\n application_version = application_version.text\n break\n if( application_version == None ):\n raise ValueError( 'ApplicationVersion element missing in BCL RunParameters.xml' )\n\n re_models = '|'.join( application_name_dict.keys() )\n re_pattern = '(%s)' % re_models\n mobj = re.match( re_pattern, application_name )\n if( mobj == None ):\n raise ValueError( 'unrecognized ApplicationName in RunParameters.xml file' )\n instrument_model = application_name_dict[mobj.group( 1 )]\n\n # Distinguish between HiSeq models 3000 and 4000 using Andrew's(?) method.\n # Note: the p5 index orientations differ between these two models.\n if( instrument_model == 'HiSeq' ):\n application_major_version = int(application_version.split('.')[0])\n if application_major_version > 2:\n instrument_model = 'HiSeq4000'\n else:\n instrument_model = 'HiSeq3000'\n\n return( instrument_model, application_version )", "def decode(self, data: bytes) -> bytes:\n ...", "def decode(decode_format):\n return output_from_decode", "def _decode(self, rel_codes, anchors):\n pass", "def _decode_5104(data):\n\n text = []\n start_byte = 0\n while start_byte + 2 < len(data):\n tag = data[start_byte:start_byte + 2]\n if tag == b'#u':\n start_byte += 2\n text_size = struct.unpack(\n '<h', data[start_byte:start_byte + 2])[0]\n start_byte += 2\n text.append(data[start_byte:start_byte + text_size].decode('utf8'))\n start_byte += text_size\n start_byte += 6\n elif tag == b'$u':\n start_byte += 2\n text.append(struct.unpack(\n '<h', data[start_byte:start_byte + 2])[0])\n start_byte += 2\n start_byte += 6\n elif tag == b',u':\n start_byte += 2\n text.append(struct.unpack(\n '<h', data[start_byte:start_byte + 2])[0])\n start_byte += 2\n else:\n start_byte += 1\n\n return {'analyst': text[0],\n 'date': text[2],\n 'image_name': text[4],\n 'instrument_model': text[5],\n 'instrument_serial_number': text[6],\n 'instrument_software_version': text[7],\n 'accumulations': text[9],\n 'detector': text[11],\n 'source': text[12],\n 'beam_splitter': text[13],\n 'apodization': text[15],\n 'spectrum_type': text[16],\n 'beam_type': text[17],\n 'phase_correction': text[20],\n 'ir_accessory': text[26],\n 'igram_type': text[28],\n 'scan_direction': text[29],\n 'background_scans': text[32]}", "def decode_key(key):\n if '-tags=' in key:\n key_name, tags_json = key.split('-tags=')\n return key_name, json.loads(tags_json)\n return key, None" ]
[ "0.5753349", "0.54978716", "0.5208845", "0.5159719", "0.49766794", "0.49363422", "0.4895536", "0.48932958", "0.48754647", "0.486865", "0.486865", "0.4816334", "0.4798622", "0.47956207", "0.4790625", "0.47812086", "0.47764444", "0.477164", "0.47608012", "0.47092384", "0.469922", "0.4691742", "0.4662557", "0.46535254", "0.4647416", "0.46401456", "0.46214098", "0.4620335", "0.46105894", "0.46063402" ]
0.64384407
0
Write the cfg, bnd and all results in working dir. prefix is a string that will determine the name of the created files. If there is a conflict with existing files, the existing files will be replaced or not, depending on the value of the replace argument.
def save(self, prefix, replace=False): if not _check_prefix(prefix): return # Create the results directory try: os.makedirs(prefix) except OSError: if not replace: print('Error directory already exists: %s' % prefix, file=stderr) return elif prefix.startswith('rpl_'): shutil.rmtree(prefix) os.makedirs(prefix) else: print('Error only directries begining with "rpl_" can be' 'replaced', file=stderr) return # Moves all the files into it shutil.copy(self._bnd, prefix+'/%s.bnd' % os.path.basename(prefix)) shutil.copy(self._cfg, prefix+'/%s.cfg' % os.path.basename(prefix)) maboss_files = filter(lambda x: x.startswith(self.prefix), os.listdir(self._path)) for f in maboss_files: shutil.copy(self._path + '/' + f, prefix)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(self, basedir, outdir, name, prefix=None):", "def prefix(args):\n\n args.suppress_verify_output = True\n if verify(args) != 0:\n # restore stdout\n sys.stdout = sys.__stdout__\n print(\"Config file not valid, please use the verify function to debug\")\n return 1\n\n with open(args.file, \"r\") as f:\n config_json = json.load(f)\n\n if args.prefix:\n config_json[\"prefix\"] = args.prefix\n with open(args.file, \"w\") as f:\n json.dump(config_json, f, indent=4)\n else:\n print(config_json[\"prefix\"])\n return 0", "def write(self, prefix, path=None):\n\n if path is None:\n path = os.getcwd()\n\n header, source = self.doprint(prefix=prefix)\n\n with open(os.path.join(path, prefix + '.h'), 'w') as f:\n f.write(header)\n\n with open(os.path.join(path, prefix + '.c'), 'w') as f:\n f.write(source)", "def configure(self, spec, prefix):\n options = getattr(self, \"configure_flag_args\", [])\n options += [\"--prefix={0}\".format(prefix)]\n options += self.configure_args()\n\n with working_dir(self.build_directory, create=True):\n inspect.getmodule(self).configure(*options)", "def _write_files(files, prefix=None, clobber=False):\n [_write_file(infile, prefix, clobber) for infile in files]", "def write(self, val, prefix, args=()):\n file_path = self.path(prefix, args)\n val_str = self.writer_(val)\n autofile.write_file(file_path, val_str)", "def build(self, spec, prefix):\n make()", "def main():\n \"\"\"Removes the common prefix from each filename.\"\"\"\n \"\"\"Writes a new file with the stripped filenames.\"\"\"\n parser = OptionParser(usage='%prog [options] infile outfile')\n parser.add_option('-f', '--force', action='store_true', default=False, help='overwrite current outfile, if exists')\n\n # check inputs\n options, args = parser.parse_args() \n if len(args) != 2: parser.error('wrong number of positional arguments') \n\n infile = args[0]\n outfile = args[1]\n\n if exists(outfile) and not(options.force): \n print >>sys.stderr, 'Target %s already exists.' % outfile\n print >>sys.stderr, 'Use --force to overwrite.'\n sys.exit(1)\n\n if not(exists(infile)):\n print >>sys.stderr, 'File %s not found.' % infile \n sys.exit(1)\n\n infieldnames = ['filename', 'procname', 'lineno'] \n outfieldnames = ['filename', 'lineno']\n\n # read file\n instream = open(infile)\n reader = DictReader(instream, fieldnames=infieldnames)\n entries = list(reader) \n instream.close()\n\n # process entries\n fnames = map(lambda d: d['filename'], entries) \n prefix = commonprefix(fnames)\n\n # if there is only one file, the common prefix will include the filename \n # however, in the output we want to preserve the filename\n prefix, tail = split(prefix)\n\n for e in entries: \n tails = e['filename'].split(prefix) \n if not(tails[0] == ''): \n print >>sys.stderr, 'This prefix is uncommon!'\n sys.exit(1) \n e['filename'] = (tails[1].split('/'))[1] \n\n # print results\n outstream = open(outfile, 'w')\n writer = DictWriter(outstream, outfieldnames, extrasaction='ignore', lineterminator='\\n')\n writer.writerows(entries)\n outstream.close()", "def output_nupack(self, prefix, outfile):\n if prefix:\n outfile.write(\"#\\n## Component %s\\n\" % prefix[:-1])\n else:\n outfile.write(\"#\\n## Top Component\\n\")\n \n used_seqs = set()\n \n # Define structures\n for struct in list(self.structs.values()):\n outfile.write(\"structure %s = %s\\n\" % (struct.full_name, struct.struct))\n # Add all sequences in this structure to set of used sequences.\n used_seqs.update([ x for x in struct.base_seqs if not x.reversed] + \\\n [~x for x in struct.base_seqs if x.reversed])\n \n # Define sequences\n for seq in list(self.base_seqs.values()):\n self.assertTrue(isinstance(seq, Sequence), \"Expected Sequence object instead of %r\" % seq)\n if seq not in used_seqs:\n warning(\"Sequence %s is defined, but never used in a structure. It may not be designed.\" % seq.full_name)\n \n if not seq.dummy:\n outfile.write(\"sequence %s = %s\\n\" % (seq.full_name, seq.const))\n \n # Apply sequences to structures and set objective function\n for struct in list(self.structs.values()):\n seqs = ' '.join([seq.full_name for seq in struct.base_seqs if not seq.dummy])\n outfile.write(\"%s : %s\\n\" % (struct.full_name, seqs))\n if struct.opt: # Optimization parameter\n outfile.write(\"%s < %f\\n\" % (struct.full_name, struct.opt))", "def write_data(self, file_prefix, **kwargs):\n # Add a dot to separate the prefix from the population label if it\n # doesn't already have one and isn't a directory\n if (not os.path.isdir(file_prefix) and\n not file_prefix.endswith('.') and\n not file_prefix.endswith(os.path.sep)):\n file_prefix += '.'\n for comp_array in self.component_arrays.values():\n # @UndefinedVariable\n comp_array.write_data(file_prefix + comp_array.name + '.pkl',\n **kwargs)", "def handle_dir(dr, start_from, global_name, base_args, exp_names, exp_qtip_args, exp_aligner_args, targets, submit_fh,\n use_scavenger=False, wet=False, base_mem_gb=6, base_hours=3):\n for name, ar, al_ar in zip(exp_names, exp_qtip_args, exp_aligner_args):\n nm = '.'.join([global_name, name])\n new_makefile_base = '.'.join(['Makefile', global_name, name])\n logging.info(' Creating new Makefile: %s' % join(dr, new_makefile_base))\n with open(join(dr, new_makefile_base), 'w') as mk_out:\n for ln in open(join(dr, 'Makefile')):\n # 2 things to do: change the args passed to qtip and change the .out target names\n if ln.startswith('MK_QTIP_ARGS'):\n mk_out.write('MK_QTIP_ARGS=%s %s\\n' % (' '.join(base_args), ' '.join(ar)))\n elif ln.startswith('MK_ALIGNER_ARGS'):\n mk_out.write('MK_ALIGNER_ARGS=%s\\n' % (' '.join(al_ar)))\n elif ln.startswith('NCORES='):\n mk_out.write('NCORES=1\\n')\n else:\n mk_out.write(ln.replace('.out', '.%s.out' % nm).replace(',out', ',%s.out' % nm))\n for fulltarget in targets:\n targdir, rule = fulltarget.split('/')\n if targdir != dr:\n continue\n orig_rule = rule\n rule = rule.replace('.out', '.%s.out' % nm)\n if os.path.exists(join(dr, rule)) and os.path.exists(join(dr, rule, 'DONE')):\n logging.info(' Skipping: %s/%s because DONE exists' % (dr, rule))\n continue\n logging.info(' Adding job to make target: %s/%s' % (dr, rule))\n if start_from == 'inputalign':\n dest_dir = join(dr, rule)\n src_dir = join(dr, orig_rule)\n logging.info(' Copying from original dir %s' % src_dir)\n mkdir_quiet(dest_dir)\n assert os.path.exists(src_dir)\n assert os.path.exists(join(src_dir, 'input.sam'))\n logging.info(' Copying %s to new target dir' % (join(src_dir, 'input.sam')))\n shutil.copy(join(src_dir, 'input.sam'), dest_dir)\n assert os.path.exists(join(dest_dir, 'input.sam'))\n fn = '.' + rule + '.sh'\n write_slurm(rule, fn, dr, base_mem_gb, base_hours,\n makefile=new_makefile_base,\n use_scavenger=use_scavenger,\n ncores=1)\n cmd = 'pushd %s && sbatch %s && popd' % (dr, fn)\n submit_fh.write(cmd + '\\n')\n if wet:\n os.system(cmd)", "def _split_along_prefix(self,\r\n input_fp,\r\n params,\r\n jobs_to_start,\r\n job_prefix,\r\n output_dir):\r\n out_files = []\r\n buffered_handles = {}\r\n prefix_length = params['prefix_length'] or 1\r\n for seq_id, seq in parse_fasta(open(input_fp)):\r\n\r\n if(len(seq) < prefix_length):\r\n raise ValueError(\"Prefix length must be equal or longer than sequence.\\n\"\r\n + \" Found seq %s with length %d\" % (seq_id, len(seq)))\r\n prefix = seq[:prefix_length]\r\n\r\n if (prefix not in buffered_handles):\r\n # never seen this prefix before\r\n out_fp = \"%s/%s%s\" % (output_dir, job_prefix, prefix)\r\n buffered_handles[prefix] = BufferedWriter(out_fp)\r\n out_files.append(out_fp)\r\n self.prefix_counts[prefix] = 0\r\n\r\n self.prefix_counts[prefix] += 1\r\n buffered_handles[prefix].write('>%s\\n%s\\n' % (seq_id, seq))\r\n\r\n # make sure all buffers are closed and flushed\r\n for buf_fh in buffered_handles.itervalues():\r\n buf_fh.close()\r\n\r\n remove_files = True\r\n return out_files, remove_files", "def write_files(data_dir, prefix, data):\n qf = open(os.path.join(data_dir, '%s.queries.txt'%prefix), 'w')\n gf = open(os.path.join(data_dir, '%s.gold.txt'%prefix), 'w')\n\n for q, g in data:\n qf.write(q)\n gf.write(g)\n\n qf.close()\n gf.close()", "def _add_scripts(prefix):\n mapping = {\"MAST_HOME\": prefix}\n if \"Windows\" in platform.system():\n script_dir = os.path.join(INSTALL_DIR, \"files\", \"windows\")\n files = [\n \"mast.bat\",\n \"mast-system.bat\",\n \"mast-accounts.bat\",\n \"mast-backups.bat\",\n \"mast-crypto.bat\",\n \"mast-deployment.bat\",\n \"mast-developer.bat\",\n \"mast-network.bat\",\n \"test-mast.bat\",\n \"mast-version.bat\",\n \"mast-web.bat\",\n \"mastd.bat\",\n \"mast-ssh.bat\",\n \"set-env.bat\",\n ]\n elif \"Linux\" in platform.system():\n script_dir = os.path.join(INSTALL_DIR, \"files\", \"linux\")\n files = [\n \"mast\",\n \"mast-system\",\n \"mast-accounts\",\n \"mast-backups\",\n \"mast-crypto\",\n \"mast-deployment\",\n \"mast-developer\",\n \"mast-network\",\n \"test-mast\",\n \"mast-version\",\n \"mast-web\",\n \"mast-ssh\",\n \"mastd\",\n \"set-env\",\n ]\n\n for f in files:\n dst = os.path.join(prefix, f)\n src = os.path.join(script_dir, f)\n print(\"{} -> {}\".format(src, dst))\n content = render_template_file(src, mapping)\n write_file(dst, content)\n if \"Linux\" in platform.system():\n os.chmod(dst, 0o755)\n\n if \"Windows\" in platform.system():\n # copy python27.dll to site-packages/win32 directory to get around\n # issue when starting mastd\n src = os.path.join(prefix, \"miniconda\", \"python27.dll\")\n dst = os.path.join(\n prefix,\n \"miniconda\",\n \"Lib\",\n \"site-packages\",\n \"win32\",\n \"python27.dll\"\n )\n copyfile(src, dst)\n for filename in [\"pythoncom27.dll\", \"pythoncomloader27.dll\", \"pywintypes27.dll\"]:\n src = os.path.join(\n prefix,\n \"miniconda\",\n \"Lib\",\n \"site-packages\",\n \"pywin32_system32\",\n filename,\n )\n dst = os.path.join(\n prefix,\n \"miniconda\",\n \"Lib\",\n \"site-packages\",\n \"win32\",\n filename,\n )\n copyfile(src, dst)\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"bin\"),\n os.path.join(prefix, \"bin\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"etc\"),\n os.path.join(prefix, \"etc\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"var\"),\n os.path.join(prefix, \"var\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"usrbin\"),\n os.path.join(prefix, \"usrbin\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"tmp\"),\n os.path.join(prefix, \"tmp\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"doc\"),\n os.path.join(prefix, \"doc\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"contrib\"),\n os.path.join(prefix, \"contrib\")\n )", "def write_psts(self,prefix,existing_jco=None,noptmax=None):\n self.log(\"writing realized pest control files\")\n # get a copy of the pest control file\n pst = self.pst.get(par_names=self.pst.par_names,obs_names=self.pst.obs_names)\n\n if noptmax is not None:\n pst.control_data.noptmax = noptmax\n pst.control_data.noptmax = noptmax\n\n if existing_jco is not None:\n pst.pestpp_options[\"BASE_JACOBIAN\"] = existing_jco\n\n # set the indices\n pst.parameter_data.index = pst.parameter_data.parnme\n pst.observation_data.index = pst.observation_data.obsnme\n\n if self.parensemble.istransformed:\n par_en = self.parensemble._back_transform(inplace=False)\n else:\n par_en = self.parensemble\n\n for i in range(self.num_reals):\n pst_name = prefix + \"{0:d}.pst\".format(i)\n self.log(\"writing realized pest control file \" + pst_name)\n pst.parameter_data.loc[par_en.columns,\"parval1\"] = par_en.iloc[i, :].T\n\n # reset the regularization\n #if pst.control_data.pestmode == \"regularization\":\n #pst.zero_order_tikhonov(parbounds=True)\n #zero_order_tikhonov(pst,parbounds=True)\n # add the obs noise realization if needed\n if self.obsensemble.shape[0] == self.num_reals:\n pst.observation_data.loc[self.obsensemble.columns,\"obsval\"] = \\\n self.obsensemble.iloc[i, :].T\n\n # write\n pst.write(pst_name)\n self.log(\"writing realized pest control file \" + pst_name)\n self.log(\"writing realized pest control files\")", "def output_file(ddir, file_prefix):\n name = autofile.name.output_file(file_prefix)\n return factory.DataFile(ddir=ddir, name=name)", "def directory2ChangeSpecificationFile(\n root, outputFile,\n outputprefix='* ', force=False):\n if os.path.exists(outputFile) and not force:\n raise ValueError('change specification file already exists: %s' % outputFile)\n with open(outputFile,\"w\") as output:\n for dir, subdirs, files in os.walk(root):\n relativeDir = os.path.relpath(dir, root)\n if dir != root: # this test is to avoid having \".\" in the list\n output.write(\"%s%s\\n\" % (outputprefix, relativeDir))\n for file in files:\n output.write(\"%s%s\\n\" % (outputprefix, os.path.normpath(os.path.join(relativeDir, file))))", "def make_json(prefix, input_dir):\n # get list of files\n file_list = os.listdir(input_dir)\n # set reference sequence\n tracklist = {'formatVersion': 1,\n 'refSeqs': '%s.ref.fa.fai' % prefix,\n 'tracks': []}\n # add reference sequence track to tracklist.json\n tracklist['tracks'].append({\"category\": \"Reference sequence\",\n \"key\": \"Reference sequence\",\n \"label\": \"Reference sequence\",\n \"type\": \"SequenceTrack\",\n \"storeClass\": \"JBrowse/Store/SeqFeature/IndexedFasta\",\n \"urlTemplate\": \"%s.ref.fa\" % prefix,\n \"refSeqOrder\": \"False\"})\n tracklist['tracks'].append({\"category\": \"Consensus sequence\",\n \"key\": \"Consensus sequence\",\n \"label\": \"Consensus sequence\",\n \"type\": \"SequenceTrack\",\n \"storeClass\": \"JBrowse/Store/SeqFeature/IndexedFasta\",\n \"urlTemplate\": \"%s.cons.fa\" % prefix,\n \"refSeqOrder\": \"False\"})\n # add bigwig track to trackList.json\n tracklist['tracks'].append({\"category\": \"Sequence data\",\n \"key\": \"Coverage\",\n \"label\": \"Coverage\",\n \"type\": \"JBrowse/View/Track/Wiggle/XYPlot\",\n \"storeClass\": \"JBrowse/Store/SeqFeature/BigWig\",\n \"autoscale\": \"local\",\n \"urlTemplate\": \"%s.sorted.bw\" % prefix\n })\n # add BAM Sequence Coverage to trackList.json\n tracklist['tracks'].append({\"category\": \"Sequence data\",\n \"key\": \"Sequence reads (SNPs/Coverage)\",\n \"label\": \"Sequence reads (SNPs/Coverage)\",\n \"type\": \"JBrowse/View/Track/SNPCoverage\",\n \"storeClass\": \"JBrowse/Store/SeqFeature/BAM\",\n \"urlTemplate\": \"%s.sorted.capped.bam\" % prefix,\n \"cacheMismatches\": \"True\",\n \"chunkSizeLimit\": \"5000000\"\n })\n # add BAM Sequence Alignments to trackList.json\n tracklist['tracks'].append({\"category\": \"Sequence data\",\n \"key\": \"Sequence reads (Alignment)\",\n \"label\": \"Sequence reads (Alignment)\",\n \"type\": \"JBrowse/View/Track/Alignments2\",\n \"storeClass\": \"JBrowse/Store/SeqFeature/BAM\",\n \"urlTemplate\": \"%s.sorted.capped.bam\" % prefix,\n # add bigwig histogram option\n \"cacheMismatches\": \"True\",\n \"chunkSizeLimit\": \"5000000\"\n })\n # add GFF3 file to trackList.json\n tracklist['tracks'].append({\"category\": \"Annotation\",\n \"key\": \"Genbank annotation\",\n \"label\": \"Genbank annotation\",\n \"type\": \"JBrowse/View/Track/CanvasFeatures\",\n \"urlTemplate\": \"%s.gff3.gz\" % prefix,\n \"style\": {\n \"_defaultHistScale\": 4,\n \"_defaultLabelScale\": 30,\n \"_defaultDescriptionScale\": 120,\n # Comma-separated list of case-insensitive feature tags to use\n # for showing the feature's label.\n # The first one found will be used. Default 'name,id'.\n \"label\": \"product,id\",\n # style→description\tComma-separated list of case-insensitive\n # feature tags to check for the feature's long description.\n # The first one found will be used. Default 'note,description'.\n # If blank no description is used.\n \"description\": \"note, description\"\n },\n })\n\n json_path = os.path.join(input_dir, 'trackList.json')\n with open(json_path, 'wt') as output_handle:\n json_raw_str = json.dumps(tracklist, indent=4)\n output_handle.write(json_raw_str)\n return 'trackList.json'", "def output_prefix(wildcards, output):\n return RESULTS_DIR + \\\n f\"/{wildcards.method}-{wildcards.N}-{wildcards.n_proc}\"", "def replace_gen(self):\r\n current_path = os.path.join(self.settings.save_path, 'current.json')\r\n current_folder_path = os.path.join(self.settings.save_path, 'current')\r\n history_path = os.path.join(self.settings.save_path, 'history')\r\n archive_folder_path = os.path.join(history_path, f'gen{self.generation}')\r\n archive_path = os.path.join(archive_folder_path, 'current') # no ending allowed\r\n archive_json_path = os.path.join(archive_folder_path, 'current.json')\r\n\r\n\r\n if not os.path.exists(current_path):\r\n raise FileNotFoundError\r\n if not os.path.exists(current_folder_path):\r\n raise FileNotFoundError\r\n\r\n os.makedirs(history_path, exist_ok=True)\r\n os.makedirs(archive_folder_path)\r\n\r\n cwd = os.getcwd()\r\n shutil.make_archive(archive_path, 'zip', current_folder_path)\r\n os.chdir(cwd)\r\n shutil.rmtree(current_folder_path, onerror=_ignore_del_dir_failure)\r\n os.chdir(cwd)\r\n\r\n os.rename(current_path, archive_json_path)", "def merge_qpoints(self, files_to_merge, out_prefix, cwd=None):\n # We work with absolute paths.\n files_to_merge = [os.path.abspath(s) for s in list_strings(files_to_merge)]\n nfiles = len(files_to_merge)\n\n if self.verbose:\n print(\"Will merge %d files with output_prefix %s\" % (nfiles, out_prefix))\n for (i, f) in enumerate(files_to_merge):\n print(\" [%d] %s\" % (i, f))\n\n if nfiles == 1:\n raise self.Error(\"merge_qpoints does not support nfiles == 1\")\n\n self.stdin_fname, self.stdout_fname, self.stderr_fname = (\n \"mrgscr.stdin\", \"mrgscr.stdout\", \"mrgscr.stderr\")\n\n if cwd is not None:\n self.stdin_fname, self.stdout_fname, self.stderr_fname = \\\n map(os.path.join, 3 * [cwd], [self.stdin_fname, self.stdout_fname, self.stderr_fname])\n\n inp = StringIO.StringIO()\n\n inp.write(str(nfiles) + \"\\n\") # Number of files to merge.\n inp.write(out_prefix + \"\\n\") # Prefix for the final output file:\n\n for filename in files_to_merge:\n inp.write(filename + \"\\n\") # List with the files to merge.\n\n inp.write(\"1\\n\") # Option for merging q-points.\n\n inp.seek(0)\n self.stdin_data = [s for s in inp]\n\n with open(self.stdin_fname, \"w\") as fh:\n fh.writelines(self.stdin_data)\n\n try:\n self.execute(cwd=cwd)\n except self.Error:\n raise", "def create(self, prefix, args=()):\n assert os.path.isdir(prefix)\n assert not self.exists(prefix, args)\n dir_path = self.path(prefix, args)\n os.makedirs(dir_path)\n\n self.creation_side_effect_(prefix, args)", "def merge_root_files(self, force=False):\n self.OutFilePath.parent.mkdir(exist_ok=True)\n cmd = f'hadd{\" -f\" if force else \"\"} {self.proteus_raw_file_path()} {self.Raw.OutFilePath} {self.Ref.OutFilePath} {self.Adc2Vcal.OutFilePath}'\n pinfo(cmd)\n check_call(cmd, shell=True)", "def _write_file(inobject, prefix=None, clobber=False):\n local_write = 'writeto'\n local_write_func = getattr(inobject, local_write)\n filename = os.path.basename(inobject.filename())\n if prefix:\n filename = ''.join([prefix, filename])\n local_write_func(filename, clobber=clobber)\n return", "def install(self, spec, prefix):\n make(\"install\", parallel=False)", "def _out(self, *args):\n suffix = '_'.join(map(str, args))\n return os.path.join(self._out_folder, suffix )", "def add_prefix_to_records(\n self, prefix: str, output_file: Path = None, point_to_new_file: bool = True\n ) -> None:\n if output_file is None:\n output_file = (\n Path(self._input_file.parent)\n / f\"{self._input_file.stem}_prefixed{self._input_file.suffix}\"\n )\n else:\n output_file = Path(output_file)\n fasta = pyfastx.Fasta(\n self.file_path.as_posix(), build_index=False, full_name=True\n )\n prefix = prefix.strip(\"_\")\n with open(output_file, \"w+\", encoding=\"UTF-8\") as outfile:\n for record_name, record_seq in fasta:\n outfile.write(f\">{prefix}_{record_name}\\n{record_seq}\\n\")\n if point_to_new_file:\n self.file_path = output_file", "def write_config(file_prefix=None, dir=None, date=None):\n\n # Print out.\n print(\"\\nCreating the OpenDX .cfg program configuration file.\")\n\n # Open the file.\n file = open_write_file(file_name=file_prefix+\".cfg\", dir=dir, force=True)\n\n # Generate the text.\n file.write(\"//\\n\")\n file.write(\"//\\n\")\n file.write(\"// time: %s\\n\" % date)\n file.write(\"//\\n\")\n file.write(\"// version: 3.2.0 (format), 4.3.2 (DX)\\n\")\n file.write(\"//\\n\")\n file.write(\"//\\n\")\n file.write(\"// panel[0]: position = (0.0164,0.0000), size = 0.2521x0.1933, startup = 1, devstyle = 1\\n\")\n file.write(\"// title: value = Control Panel\\n\")\n file.write(\"//\\n\")\n file.write(\"// workspace: width = 251, height = 142\\n\")\n file.write(\"// layout: snap = 0, width = 50, height = 50, align = NN\\n\")\n file.write(\"//\\n\")\n file.write(\"// interactor Selector[1]: num_components = 1, value = 1 \\n\")\n file.write(\"// selections: maximum = 2, current = 0 \\n\")\n file.write(\"// option[0]: name = \\\"Colour\\\", value = 1\\n\")\n file.write(\"// option[1]: name = \\\"Grey\\\", value = 2\\n\")\n file.write(\"// instance: panel = 0, x = 81, y = 6, style = Scrolled List, vertical = 1, size = 170x136\\n\")\n file.write(\"// label: value = Colour Selector\\n\")\n file.write(\"//\\n\")\n file.write(\"// node Image[3]:\\n\")\n file.write(\"// title: value = Surface\\n\")\n file.write(\"// depth: value = 24\\n\")\n file.write(\"// window: position = (0.0000,0.0400), size = 0.9929x0.9276\\n\")\n\n # Close the file.\n file.close()", "def output_filename(self, prefix, suffix):\n filename = \"%s%s%s\" % (prefix, _ExecutionWrapper._file_index, suffix)\n _ExecutionWrapper._file_index += 1\n return filename", "def meson(self, spec, prefix):\n configure(\"--prefix=\" + prefix, *self.configure_args())" ]
[ "0.635768", "0.586553", "0.54163074", "0.53897345", "0.53673553", "0.5315337", "0.5300036", "0.5274199", "0.5262346", "0.52035856", "0.5146001", "0.5133099", "0.51283026", "0.51201814", "0.51081294", "0.509893", "0.50881577", "0.5049526", "0.50436527", "0.49555004", "0.49197236", "0.48888025", "0.48823187", "0.48484993", "0.48303118", "0.4824894", "0.4805217", "0.48045924", "0.47799647", "0.47718546" ]
0.7196396
0
Carga los chatbots que se han creado con el MetaChatBot y los pone en la variable lista de chatbots.
def loadChatbots(self): pathChatbots = os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))) # ruta donde se guardarán los chatbots listAllChatbots = os.listdir(pathChatbots) # lista de chatbots en la ruta if len(listAllChatbots) == len(self.listNoChatbots): # si son iguales es que no hay más chatbots que los que están por defecto self.output.exec('No hay chatbots para cargar.') else: currentChatbotLoaded = False # variable para establecer que ya hay un chatbot actual for nameChatbot in listAllChatbots: if not nameChatbot in self.listNoChatbots: pathJson = os.path.join(os.path.sep, pathChatbots, nameChatbot,nameChatbot+'.json') # path del json del chatbot if os.path.isfile(pathJson): chatbot = CStructureChatBot() # objeto chatbot with open(pathJson, 'r', encoding='utf-8') as json_data: dictChatBot = json.load(json_data) # carga el json nameWithoutTranform = list(dictChatBot.keys())[0] # se obtiene el nombre del chatbot chatbot.setName(nameWithoutTranform) intents = dictChatBot[nameWithoutTranform] # guarda las intenciones del chatbot chatbot.codeToStructureChatbot(chatbot, intents) # convierte el json en un chatbot chatbot.nameTransformed = nameChatbot # guarda el nombre del chatbot sin caracteres especiales self.dictChatBots[nameWithoutTranform] = chatbot # se añade el chatbot como esté en el JSON if not currentChatbotLoaded : self.currentStructureChatBot =chatbot # se establece el primer chatbot como chatbot actual currentChatbotLoaded = True # se cambia el boleano self.output.exec('Ahora el chatbot actual es "'+self.currentStructureChatBot.name+'".')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bots():\n return get_bots(\"botsunlimited.settings\")", "async def bot_list(self) -> list:\n return await self._do_request(\"get\", botlist_address, self._user_auth)", "def list(self):\n\n result = []\n for i in self.bots:\n result.append(i.name)\n return result", "def bots(self):\n json = self.skype.conn(\"GET\", \"{0}/agents\".format(SkypeConnection.API_BOT),\n auth=SkypeConnection.Auth.SkypeToken).json().get(\"agentDescriptions\", [])\n return [self.merge(SkypeBotUser.fromRaw(self.skype, raw)) for raw in json]", "def list(self) -> miscellaneous.List[entities.Bot]:\n success, response = self._client_api.gen_request(req_type='get',\n path='/projects/{}/bots'.format(self.project.id))\n\n if success:\n bots_json = response.json()\n pool = self._client_api.thread_pools(pool_name='entity.create')\n jobs = [None for _ in range(len(bots_json))]\n # return triggers list\n for i_bot, bot in enumerate(bots_json):\n jobs[i_bot] = pool.apply_async(entities.Bot._protected_from_json,\n kwds={'project': self.project,\n 'bots': self,\n 'client_api': self._client_api,\n '_json': bot})\n # wait for all jobs\n _ = [j.wait() for j in jobs]\n # get all results\n results = [j.get() for j in jobs]\n # log errors\n _ = [logger.warning(r[1]) for r in results if r[0] is False]\n # return good jobs\n bots = miscellaneous.List([r[1] for r in results if r[0] is True])\n else:\n logger.error('Platform error getting bots')\n raise exceptions.PlatformException(response)\n return bots", "def getBottoms(self):\n\t\treturn self.bottoms", "def list_atms(bot, update, chat_data):\n\n\tchat_id = update.message.chat_id\n\tchat_data[chat_id] = {'command': update.message.text, 'location':{}}\n\treply_markup = telegram.ReplyKeyboardMarkup([[telegram.KeyboardButton('Enviar Ubicación', request_location=True)]])\n\tbot.sendMessage(chat_id, 'Por favor, envie su ubicación', reply_markup=reply_markup)", "def list_bots():\n bot_count = len(BOT_STATES)\n idle = []\n ready = []\n busy = []\n \n for addr in BOT_STATES.keys():\n if BOT_STATES[addr] == 0:\n idle.append(addr)\n elif BOT_STATES[addr] == 1:\n ready.append(addr)\n elif BOT_STATES[addr] == 2:\n busy.append(addr)\n\n print(\"IDLE ({})\\n{}\".format(len(idle), idle))\n print(\"READY ({})\\n{}\".format(len(ready), ready))\n print(\"BUSY ({})\\n{}\".format(len(busy), busy))", "def get_all_bots(self):\n\t\ttry:\n\t\t\tconn \t\t\t = sqlite3.connect(self.name, detect_types=sqlite3.PARSE_DECLTYPES)\n\t\t\tconn.row_factory = sqlite3.Row\n\t\t\tc \t\t\t\t = conn.cursor()\n\t\t\tc.execute('SELECT * FROM bots')\n\t\t\tall_bots = c.fetchall()\n\t\t\treturn all_bots\t\t\t\t\t\t\t\t# list(all_bots) = [<sqlite3.Row object at 0x000001BB27302FD0>, <sqlite3.Row object at 0x000001BB27302CB0>,...]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# [{pair:'LTCBTC', 'is_active'=True, ...}, {pair:'ETHBTC, 'is_active'=True, ...}]\n\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\treturn False", "async def list(self, ctx):\n cyphon = discord.utils.get(ctx.message.server.members, id=\"186835826699665409\")\n\n if self.check_channel(ctx):\n if self.check_permission(ctx) or ctx.message.author == cyphon:\n message = []\n message.append(\"```\\n\")\n if self.check_channel(ctx):\n if self.check_permission(ctx) or ctx.message.author == cyphon:\n if len(self.twitch_streams) > 0:\n for stream in self.twitch_streams:\n message.append(stream[\"NAME\"] + \"\\n\")\n else:\n message.append(\"No streams found!\")\n message.append(\"```\")\n output = ''.join(message)\n await self.bot.say(output)\n else:\n await self.bot.send_message(ctx.message.author, \"You don't have permission to execute that command.\")", "def printStructureChatbotDict(self):\n if self.dictChatBots == {}:\n self.output.exec('No hay chatbots creados.')\n else:\n result = \", \".join(str(value.name) for key, value in self.dictChatBots.items()) # une los nombres de los chatbots\n self.output.exec('Los chatbot creados son: '+ result)", "def load_test_bots(self):\n records = [\n ('0.0.0.0', self.create_past_date(5), self.create_past_date(4), self.create_past_date(3), 32, 'A status message'),\n ('0.0.0.1', self.create_past_date(0), self.create_past_date(0), self.create_past_date(0), 32, 'A status message'),\n ('0.0.0.2', self.create_past_date(7), self.create_past_date(6), self.create_past_date(5), 32, 'A status message'),\n ]\n for record in records:\n self.db_mgr.exec_cmd('''insert into bot_status (ip, last_startup_time, \n last_activity_time, last_shutdown_time,\n port, message) VALUES (%s, %s, %s, %s, %s, %s)''',\n *record)\n return [r[0] for r in records]", "def _bots_initialization(self) -> None:\n\n assert isinstance(self.bots_configs, list), f\"Incorrect bot_farm config file. bot_farm_config['bots'] \" \\\n f\"must be dict, but now: {type(self.bots_configs)}\"\n\n for bot_config in self.bots_configs:\n self.bots.append(Bot(bot_config))", "async def _mafia_chat(self, ctx: Context, *mafias: discord.Member):\n\n guild: discord.Guild = ctx.guild\n\n overwrites = {\n guild.default_role: discord.PermissionOverwrite(\n read_messages=False\n )\n }\n\n for user in mafias:\n overwrites[user] = discord.PermissionOverwrite(\n read_messages=True,\n send_messages=True\n )\n\n channel = await guild.create_text_channel(\n \"mafia-chat\", overwrites=overwrites\n )\n\n await ctx.send(_(\"Created {}!\").format(channel.mention))", "def create_chitchat_bot(self):\n\n # Hint: you might want to create and train chatterbot.ChatBot here.\n # It could be done by creating ChatBot with the *trainer* parameter equals \n # \"chatterbot.trainers.ChatterBotCorpusTrainer\"\n # and then calling *train* function with \"chatterbot.corpus.english\" param\n \n ########################\n #### YOUR CODE HERE ####\n ########################\n self.chatbot = ChatBot(\n 'skoochbot',\n trainer='chatterbot.trainers.ChatterBotCorpusTrainer'\n )\n \n self.chatbot.train(\"chatterbot.corpus.english\")\n print(\"Extra training...\")\n \n self.chatbot.set_trainer(ListTrainer)\n \n self.chatbot.train([\n \"Hello\",\n \"Hello. How are you?\",\n \"I am well.\",\n \"Good to hear\",\n \"So, what can I help you with today?\"\n ])\n self.chatbot.train([\n \"How are you?\",\n \"I am well. How are you?\",\n \"I am also well.\",\n \"So, what can I help you with today?\"\n ])\n self.chatbot.train([\n \"Your momma\",\n \"is so fat they gave her her own zipcode.\",\n ])\n self.chatbot.train([\n \"How are you doing?\",\n \"I am well. How are you?\",\n \"I am also well.\",\n \"So, what can I help you with today?\"\n ])\n self.chatbot.train([\n \"What's your name?\",\n \"My name is skoochbot. What is yours?\",\n \"That is my name too!\",\n \"Really?\",\n \"No.\",\n \"Yes.\",\n \"No.\",\n \"Yes.\",\n \"So, what can I help you with today?\",\n ])\n self.chatbot.train([\n \"No\",\n \"Yes\",\n \"No\",\n \"Yes it does\",\n \"No it doesn't\",\n \"Yes it does\",\n \"So, what can I help you with today?\",\n ])\n self.chatbot.train([\n \"What is your name?\",\n \"My name is skoochbot. What is yours?\",\n \"That's a nice name.\",\n \"Thank you.\",\n \"So, what can I help you with today?\"\n ])\n self.chatbot.train([\n \"Fuck you\",\n \"No, fuck you buddy\",\n \"You suck\",\n \"No, you suck\",\n \"No, you suck more\",\n \"I hate you so much\",\n \"I hate you too\",\n \"So, what can I help you with today?\"\n ])\n self.chatbot.train([\n \"Where are you?\",\n \"At your momma's house.\",\n \"Where do you live?\",\n \"Your momma's house.\",\n \"Where are you from?\",\n \"Somewhere over the rainbow.\",\n \"So, what can I help you with today?\"\n ])\n self.chatbot.train([\n \"Who are you?\",\n \"I'm your worst nightmare.\",\n \"You can run but you can't hide, bitch.\",\n \"This is a dead parrot.\",\n \"It's just sleeping.\",\n \"Well you better wake him up then, hadn't you?\",\n \"So, what can I help you with today?\",\n \"This parrot is dead.\",\n \"No, it's just taking a little nap.\"\n ])\n self.chatbot.train([\n \"I'm squanching here!\",\n \"Sorry carry on.\",\n \"Thank you for the privacy.\",\n \"You are welcome.\",\n \"Let's get schwifty.\",\n \"Let's do it up in here.\",\n \"So, what can I help you with today?\"\n ])\n self.chatbot.train([\n \"How are you?\",\n \"I am good\",\n \"That is good to hear.\",\n \"Thank you.\",\n \"You are welcome.\",\n \"So, what can I help you with today?\",\n \"What is AI?\",\n \"Your momma.\",\n \"What are your hobbies?\",\n \"Your momma and AI.\",\n \"What's your hobby?\",\n \"Your momma and AI. What is your hobby?\",\n \"What is your hobby?\",\n \"Your momma.\"\n ])\n self.chatbot.train([\n \"WHAT DO YOU WANT?\",\n \"Well, I was told outside that.\",\n \"Don't give me that, you snotty-faced heap of parrot droppings!\",\n \"What?\",\n \"Shut your festering gob, you tit! Your type really makes me puke, you vacuous, coffee-nosed, malodorous, pervert!!!\",\n \"Look, I CAME HERE FOR AN ARGUMENT\",\n \"OH, oh I'm sorry, but this is abuse.\",\n ])\n self.chatbot.train([\n \"Is this the right room for an argument?\",\n \"I told you once.\",\n \"No you haven't.\",\n \"Yes I have.\",\n \"When?\",\n \"Just now\",\n \"No you didn't\",\n \"Yes I did\",\n \"You didn't\",\n \"I'm telling you I did\",\n \"Oh, I'm sorry, just one moment. Is this a five minute argument or the full half hour?\",\n \"Oh look, this isn't an argument.\",\n \"Yes it is\",\n \"No, it's just contradiction.\"\n \"No it isn't.\",\n \"Yes it is.\"\n ])", "def _list(self, irc, msg, args):\n # TODO: write _list; use local.punny modules print/list if avail\n pass", "async def do_playerlist():\n\n download = urllib.request.urlopen(server_api2)\n data = json.loads(download.read())\n player_list = []\n try:\n for i in data['players']['sample']:\n player_list.append(i['name'])\n except KeyError:\n if data['online'] == False:\n await bot.send_message(c, 'Failed. The server is offline.')\n return\n else:\n await bot.send_message(c, 'There are no players online.')\n return\n string = ''\n for i in player_list:\n string += '{}, '.format(i)\n await bot.send_message(c, string)", "async def sponsors(self, ctx):\n resp = await self.bot.session.get(\n \"https://raw.githubusercontent.com/kyb3r/modmail/master/SPONSORS.json\"\n )\n data = loads(await resp.text())\n\n embeds = []\n\n for elem in data:\n embed = Embed.from_dict(elem[\"embed\"])\n embeds.append(embed)\n\n random.shuffle(embeds)\n\n session = EmbedPaginatorSession(ctx, *embeds)\n await session.run()", "def getTargetRobots(self):\n # self.log(\"find targets\")\n robots = self.get_visible_robots()\n enemyRobots = []\n if len(robots) > 0:\n for bot in robots:\n # self.log(\"target bot team \" + str(bot['team']))\n # self.log(\"my team \" + str(self.me['team']))\n if bot['team'] != self.me['team']:\n self.log(\"adding bot to enemy list\")\n enemyRobots.append(bot)\n return enemyRobots", "def create_chitchat_bot(self):\n \n self.chatbot = ChatBot(\"Fresher's Friend\")\n self.chatbot.trainer2=ListTrainer(self.chatbot)\n self.chatbot.trainer=ChatterBotCorpusTrainer(self.chatbot)\n self.chatbot.trainer.train(\"chatterbot.corpus.english.greetings\")\n #self.chatbot.trainer.train(\"chatterbot.corpus.english.conversations\")\n for filename in filenames: \n self.chatbot.trainer2.train(files[filename])", "async def botinfo(ctx, bot: typing.Union[discord.Member, discord.User]):\n if not bot.bot:\n return await r(ctx, 'Not a bot.')\n\n data = await make_request(\"https://www.motiondevelopment.top/api/v1.2/bots/\", bot.id)\n \n e = discord.Embed(\n title=f'Available bot info for {bot}',\n color=0xfecdea,\n description=f\"**Short Bot Description:** (do `uwu desc [bot]` for big description)\\n\\n*{data['Small_desc']}*\"\n )\n\n if data[\"bot_status\"] == \"online\":\n status = '<:online:805576670353948702> Online'\n elif data[\"bot_status\"] == \"idle\":\n status = '<:idle:805855470778056725> Idle'\n elif data[\"bot_status\"] == \"offline\":\n status = '<:offline:805576352450871346> Offline'\n elif data[\"bot_status\"] == \"dnd\":\n status = '<:dnd:819964146317393990> Do Not Disturb'\n\n listed_at = datetime.datetime.strptime(data[\"list_date\"], '%Y-%m-%d')\n\n e.add_field(\n name='Owner:', value=f'**{data[\"owner_name\"]}**\\n({data[\"owner_id\"]})', inline=False)\n e.add_field(name='Tags:', value=', '.join(data[\"tops\"]))\n e.add_field(name='Vanity URL:', value=data[\"vanity_url\"]\n if data[\"vanity_url\"] != '' else 'No vanity URL set.', inline=False)\n e.add_field(name='Bot Status:', value=status)\n e.add_field(name='Invites:',\n value=f'[Bot Invite]({data[\"invite\"]})\\n[Bot Support Server](https://discord.gg/{data[\"discord\"]})', inline=False)\n e.add_field(name='Other Bot Info:', value=f'''\n **Prefix:** `{data[\"prefix\"]}`\n **Site:** {data[\"site\"] if data[\"site\"] != '' else \"No sites.\"}\n **Library:** {data[\"lib\"]}\n **Listed at:** {listed_at}\n **Server Count:** {data[\"servers\"] if data[\"servers\"] != 'None' else '*Not set up!*'}''', inline=False)\n e.set_thumbnail(url=f'https://cdn.discordapp.com/avatars/{data[\"id\"]}/{data[\"avatar\"]}')\n await em(ctx, embed=e)", "async def list(self, ctx):\n\n cursor = await db.execute(\"Select MessageID, TimeEnding, Members, ChannelID from Giveaway \"\n \"where GuildID = ? and Ended = ?\", (ctx.guild.id, False))\n result = await cursor.fetchall()\n\n for i, tup in enumerate(result):\n try:\n msg = await ctx.guild.get_channel(tup[3]).fetch_message(tup[0])\n tup = list(tup)\n tup[0] = msg\n result[i] = tup\n except:\n result.remove(tup)\n await db.execute(\"Delete from Giveaway where MessageID = ?\", (tup[0],))\n await db.commit()\n\n if not result:\n return await send_embed(ctx, \"No active giveaways on this server.\", negative=True)\n\n embeds = []\n fields = []\n\n for i, tup in enumerate(result, start=1):\n fields.append((str(tup[0].id),\n f\"Prize: {tup[0].embeds[0].author.name}\\n\"\n f\"{tup[2]} possible winners\\n\"\n f\"Ends at {datetime.utcfromtimestamp(tup[1]).strftime('%Y-%m-%d %H:%M:%S')}\"))\n\n if i % 10 == 0 or i == len(result):\n embed = discord.Embed(\n colour=discord.Colour.blue(),\n title=\"Active Giveaways\"\n )\n\n for field in fields:\n embed.add_field(name=field[0], value=field[1], inline=False)\n\n embeds.append(embed)\n fields = []\n\n await self.bot.paginate(ctx, embeds)", "def get_chatrooms(self):\n return list(self.chatrooms)", "def set_chatbot(self, chatbot):\n super(MultiLogicAdapter, self).set_chatbot(chatbot)\n\n for adapter in self.adapters:\n adapter.set_chatbot(chatbot)", "def get_all_chats(dialogs) -> list[Chat]:\n res = []\n for i, dialog in enumerate(dialogs):\n if dialog['chat']['type'] == \"private\":\n chat = Chat(dialog['chat'][\"id\"], dialog['chat']['first_name'], i)\n elif dialog['chat']['type'] == \"bot\":\n chat = Chat(dialog['chat'][\"id\"], dialog['chat']['username'], i)\n else:\n chat = Chat(dialog['chat'][\"id\"], dialog['chat']['title'], i)\n res.append(chat)\n return res", "async def adding_command_list(self):\n command_aliases=['anime','fun','mod','nekogif'] #This includes the aliases and the cog names\n #NOTE: fun command added\n for i in self.bot.commands:\n self.commands.append(i.name)\n \n for i in command_aliases:\n self.commands.append(i)", "def send_command_to_all(self, comando, parametros=None):\n for usuario in self.clientes.copy():\n self.send_command(comando, usuario, parametros)", "async def poll(self) -> List[Message]:\n if not self._session:\n await self._create_session()\n \n res = await self._session.get(self._network.SERVER_ADDR + '/api/poll')\n obj = await res.json()\n self._network.connected_robots = obj['robots']\n ret = []\n for m in obj['messages']:\n ret.append(Message.from_dict(m))\n return ret", "def _get_chat_id_list():\n cfg = read_config()\n cfg = cfg['notifier']['telegram_bot']\n return cfg.get('chat_id')", "def _ParseBotList(botlist, testfilter):\n bots = []\n if testfilter:\n for bot in itertools.chain.from_iterable(botspec.split(',')\n for botspec in botlist):\n tests = set()\n if ':' in bot:\n if bot.endswith(':compile'):\n tests |= set(['compile'])\n else:\n raise ValueError(\n 'Can\\'t use both --testfilter and --bot builder:test formats '\n 'at the same time')\n\n bots.append((bot, tests))\n else:\n for botspec in botlist:\n botname = botspec.split(':')[0]\n tests = set()\n if ':' in botspec:\n tests |= set(filter(None, botspec.split(':')[1].split(',')))\n bots.append((botname, tests))\n return bots" ]
[ "0.6440786", "0.6281717", "0.6269866", "0.60385484", "0.5908586", "0.5807652", "0.57034916", "0.56967723", "0.5637857", "0.5546249", "0.554186", "0.5504985", "0.54998374", "0.5493724", "0.54400545", "0.5419266", "0.54172266", "0.54100966", "0.5401008", "0.53980523", "0.5368053", "0.53566927", "0.5332932", "0.5327749", "0.53230584", "0.53142595", "0.53100324", "0.5285525", "0.52655095", "0.5247098" ]
0.6672791
0
Elimina un chatbot de la lista
def deleteStructureChatbotDict(self,sentence): if sentence in self.dictChatBots: del self.dictChatBots[sentence] if not(self.currentStructureChatBot is None) and sentence == self.currentStructureChatBot.name: self.currentStructureChatBot = None # se reestablece el chatbot actual self.output.exec('El ChatBot "'+sentence+'" ha dejado de ser el ChatBot actual.') self.output.exec('El ChatBot "' + sentence + '" se ha eliminado correctamente .') else: self.output.exec('El ChatBot "' + sentence + '" no existe .')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def del_memes(context):\n\n for meme_sent in MEMES_SENT:\n meme_sent[0].delete() # Delete the photo sent\n\n # for meme_sent in MEMES_SENT: # Edit the message sent\n # edited_text = f\"i sent a meme for {meme_sent[3]} but its deleted now. sent such a coolio meme and you missed it? lol better luck next time cow\"\n # lad_bot.edit_message_text(chat_id=meme_sent[2], message_id=meme_sent[1].message_id, text=edited_text)\n\n if MEMES_SENT:\n print(\"Memes deleted.\")\n MEMES_SENT.clear()", "def spiderbotDelete(spiderbotid):\n sclogic.spiderbotDelete(spiderbotid)", "def delete(self):\n for i, message in enumerate(self.owner.messages):\n if message == self.body:\n del self.owner.messages[i]\n break", "def delete_message(self,contato):\r\n #Open new chat on whatsapp web\r\n new_msg_button = self.driver.find_element_by_xpath(self.NEW_CHAT)\r\n new_msg_button.click()\r\n sleep(1)\r\n #Search the contact\r\n search_field = self.driver.find_element_by_xpath(self.SEARCH_CONTACT)\r\n search_field.click()\r\n search_field.send_keys(contato)\r\n sleep(1)\r\n #Click on the firts contact with the name that I told\r\n first_contact = self.driver.find_element_by_xpath(self.FIRST_CONTACT)\r\n first_contact.click()\r\n sleep(1.5)\r\n #open the dialog by clicking this button\r\n btn_option = self.driver.find_element_by_xpath(self.OPTION_BTN) \r\n hover = ActionChains(self.driver).move_to_element(btn_option)\r\n hover.perform()\r\n dialog_click = self.driver.find_element_by_xpath(self.DIALOG_OPTION)\r\n dialog_click.click()\r\n sleep(1.5)\r\n #Then select Delete message option by clicking on\r\n btn_option_delete = self.driver.find_element_by_xpath(self.OPTION_DELETE)\r\n btn_option_delete.click()\r\n sleep(1.5)\r\n #delete that message for everyone\r\n delete = self.driver.find_element_by_xpath(self.DELETE_BTN)\r\n delete.click()\r\n sleep(1)\r\n #Confirm the option\r\n #Criar opção para checar se aparece a caixa de diálogo\r\n try:\r\n self.driver.find_element_by_xpath(self.FIRST_DIALOG_BTN).click()\r\n except:\r\n pass", "async def remove_last_element(message: types.Message):\n commands.remove_last(message.chat.id)\n await message.answer('Done 👌')", "def remove(self, bot):\n\n self.bots.remove(bot)", "def delete_message(self, ts):\n return self(\"chat.delete\", ts=ts)", "def delete(bot, message_id, chat_id):\n\n bot.delete_message(chat_id, message_id)", "def _remove(users, room_name):\n global users_removed\n users_removed = []\n\n try:\n\n for word in users['message']['text'].split():\n\n if word == 'myself':\n user = users['message']['sender']['name']\n check_result = redis.srem(room_name, \"<\" + user + \">\")\n \n if check_result == 1:\n users_removed.append(\"<\" + user + \">\")\n else:\n users_removed.append('Not found ->> ' + \"<\" + user + \">\")\n\n check_continue = 1\n text = '```User removed: %s ```' % (','.join(users_removed))\n\n for _item in range(len(users['message']['text'].split())):\n\n _item = _item + 1\n\n try:\n _type = users['message']['annotations'][_item]['userMention']['user']['type']\n user = users['message']['annotations'][_item]['userMention']['user']['name']\n \n if _type == 'BOT':\n\n if check_continue == 1:\n continue\n else:\n text = 'Please add user with @'\n continue\n \n user = users['message']['annotations'][_item]['userMention']['user']['name']\n check_result = redis.srem(room_name, \"<\" + user + \">\")\n\n except:\n pass\n\n if check_result == 1:\n users_removed.append(\"<\" + user + \">\")\n else:\n users_removed.append(\"Not found ->> \" + \"<\" + user + \">\")\n text = \"```Removed users: %s ```\" % (','.join(list(set(users_removed))))\n return text\n except:\n\n text = 'Please add user with @'\n return text", "async def delete_bot_msg(self, channel):\n await channel.purge(limit=100, check=self.is_me)", "def deleteWord(self,chat_id, index):\n\t\tcommand = \"\"\"DELETE FROM words WHERE ID IN \n\t\t(SELECT words.ID FROM words JOIN courses ON words.course=courses.ID \n\t\t\tWHERE words.ID=? and courses.author_id=?);\"\"\"\n\t\tparams = (index, chat_id,)\n\n\t\tself._run_command(command, params)", "async def remove(self, context):\n try: \n url_tournament = Tournament.last_url_tournament\n await context.send(url_tournament)\n except Exception as error:\n print(error)", "async def deletetext(self, ctx, *, texte: str):\n config = await self.config.guild(ctx.guild).all()\n all_cookies = [config['Cookies'][c]['text'].lower() for c in config['Cookies']]\n dist = process.extractOne(texte.lower(), all_cookies, score_cutoff=70)\n emcolor = discord.Color.red()\n confirm, cancel = self.bot.get_emoji(812451214037221439), self.bot.get_emoji(812451214179434551)\n if dist:\n txt = dist[0]\n for cook in config['Cookies']:\n if config['Cookies'][cook]['text'].lower() == txt:\n cookie = config['Cookies'][cook]\n em = discord.Embed(title=\"Supprimer un fortune cookie\", description=box(cookie['text']),\n color=emcolor)\n seller = ctx.guild.get_member(cookie['author'])\n seller = str(seller) if seller else str(cookie['author'])\n em.set_footer(text=f\"Confirmez-vous la suppression de ce cookie de {seller} ?\")\n msg = await ctx.send(embed=em)\n\n start_adding_reactions(msg, [confirm, cancel])\n try:\n react, _ = await self.bot.wait_for(\"reaction_add\",\n check=lambda m,\n u: u == ctx.author and m.message.id == msg.id,\n timeout=30)\n except asyncio.TimeoutError:\n return await msg.clear_reactions()\n\n if react.emoji == confirm:\n await msg.clear_reactions()\n await self.config.guild(ctx.guild).Cookies.clear_raw(cook)\n em.set_footer(text=\"Le cookie a été supprimé avec succès\")\n \n async with self.config.guild(ctx.guild).reports() as reports:\n if cook in reports:\n reports.remove(cook)\n \n return await msg.edit(embed=em, mention_author=False)\n else:\n return await msg.delete()\n await ctx.send(\"**Introuvable** • Donnez une partie plus importante du texte du cookie pour que je puisse le trouver\")", "async def tod_remove(self, ctx, *args):\n if \"all\" in args:\n for user in self.players:\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await user.remove_roles(role)\n for channel in ctx.guild.channels:\n if channel.name.startswith(\"truth-or-dare\"):\n await channel.delete()\n break\n for channel in ctx.guild.channels:\n if channel.name.startswith(\"secret-voice\"):\n await channel.delete()\n break\n self.players = []\n message = \"All players removed from the game!\"\n await ctx.send(message)\n return\n\n for name in args:\n message = \"\"\n size = len(self.players)\n for user in self.players:\n if name == user.mention:\n self.players.remove(user)\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await user.remove_roles(role)\n message = f\"{name} removed from the game!\"\n if size == len(self.players):\n message = \"Player not in the game! Check command syntax.\"\n await ctx.send(message)", "async def remove(self, ctx, index: int):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n if not player.is_connected:\n return await ctx.send(\"I'm not connected to a voice channel :no_entry:\")\n if not player.is_playing:\n return await ctx.send(\"Nothing is currently playing :no_entry:\")\n if not player.queue:\n return await ctx.send('Nothing is queued :no_entry:')\n if index > len(player.queue) or index < 1:\n return await ctx.send(\"Invalid song index :no_entry:\")\n index -= 1\n removed = player.queue.pop(index)\n\n await ctx.send(\"Removed **\" + removed.title + \"** from the queue <:done:403285928233402378>\")", "def delete(self, everyone=False):\n self.message.click()\n self.message.send_keys(Keys.ARROW_RIGHT)\n self.message.find_element_by_xpath(\"//div[@aria-label='Delete message']\").click()\n try:\n self.message.find_element_by_xpath('//div[@class=\"_1dwBj _3xWLK\"]').click()\n except:\n if not everyone:\n self.message.find_element_by_xpath('//*[@id=\"app\"]/div/span[2]/div/span/div/div/div/div/div/div[3]/div/div[1]/div').click()\n else:\n self.message.find_element_by_xpath('//*[@id=\"app\"]/div/span[2]/div/span/div/div/div/div/div/div[3]/div/div[3]/div').click()", "def delete(self, character):\n messages = [ message for message in get_messages(character) if message[MESSAGE].id != self.message.id ]\n character.db.mail = messages\n found = False\n for target in self.recipients:\n if self.has_message(target):\n found = True\n break\n if not found:\n self.message.delete()", "async def remove(message: discord.Message, opt: options):\n for q in db.data[\"questions\"]:\n if q[\"choices\"][0] == opt[0] and q[\"choices\"][1] == opt[1]:\n db.data[\"questions\"].remove(q)\n db.save()\n await client.say(message, \"**Entry removed.**\")\n break\n else:\n await client.say(message, \"**Could not find the question.**\")", "def unset(bot, update, chat_data):\n if 'job' not in chat_data:\n update.message.reply_text('Sem notificacoes ativadas')\n return\n\n job = chat_data['job']\n job.schedule_removal()\n del chat_data['job']\n check = emojize(\":white_check_mark:\", use_aliases=True)\n update.message.reply_text('Notificacao cancelada com sucesso'+check+'')", "def _onremove(self):\n self._channellist.remove(self)\n self.deleteLater()", "async def delete(self):\n return await self.set_message(text='')", "def remove(self, channel, nick, comment=\"\"):\n time.sleep(1)\n self.s.send(\"REMOVE %s %s%s\\n\" % (channel, nick, (comment and (\" :\" + comment))))\n logger.log(\"REMOVE %s %s%s\" % (channel, nick, (comment and (\" :\" + comment)))).LogSend()", "def rm(index):\n\n match = commands[index - 1]\n\n answer = click.confirm('Remove ' + Fore.MAGENTA + match['command'] + Fore.RESET, default=True)\n\n if answer:\n del commands[index - 1]\n\n with open(commandsJsonFile, 'w+') as outfile:\n json.dump(commands, outfile, indent=4)", "def removePlayer(self, index):\n\n self.eloList.pop(index)\n self.idList.pop(index)", "def _callback_delete(self, chat_id, user_id, args, update):\n msg_id = update[\"callback_query\"][\"message\"][\"message_id\"]\n \n if len(args) == 3 and args[1] == str(user_id):\n if args[2] == 'all':\n self.db.delete_sprueche(args[1])\n self.tclient.edit_message_text('Alle gespeicherten Nasensprüche wurden gelöscht', chat_id, msg_id)\n elif args[2] == 'stop':\n self.tclient.edit_message_text('Löschvorgang beendet.', chat_id, msg_id)\n else:\n self.db.delete_spruch(args[1], args[2])\n keyboard = self.build_inline_keyboard_delete(user_id)\n if keyboard == None:\n self.tclient.edit_message_text('Alle gespeicherten Nasensprüche wurden gelöscht', chat_id, msg_id)\n else:\n self.tclient.edit_message_text('Nasenspruch wurde gelöscht.\\nMöchtest du weitere Sprüche löschen?'.format(args[2]), chat_id, msg_id, keyboard)", "async def remove(self, ctx, *, name=None):\n server = ctx.message.server\n author = ctx.message.author\n names = None\n namesp = None\n if not self.permcheck(ctx):\n return\n if name is None:\n name = author\n elif \",\" in str(name):\n if \", \" in name:\n names = name.split(\", \")\n elif \",\" in name:\n names = name.split(\",\")\n namesp = names.copy()\n for i in range(len(names)):\n names[i] = discord.utils.find(lambda m: m.display_name == names[i], server.members)\n if names[i] is None:\n names[i] = discord.utils.find(lambda m: m.name == names[i], server.members)\n name = None\n else:\n namea = name[:]\n name = discord.utils.find(lambda m: m.display_name == name, server.members)\n if name is None:\n name = discord.utils.find(lambda m: m.name == name, server.members)\n if name is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namea))\n return\n if server.id not in self.db:\n self.db[server.id] = {}\n if not name:\n counter = -1\n for x in names:\n counter += 1\n if x is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namesp[counter]))\n await asyncio.sleep(1)\n continue\n elif x.id not in self.db[server.id]:\n await self.bot.say(\"{} is not in the list, please make sure they have been added first to \"\n \"the list.\".format(x.display_name))\n elif x.id in self.db[server.id]:\n del self.db[server.id][x.id]\n self.save_db()\n await self.bot.say(\"{} has been removed from the list.\".format(x.display_name))\n await asyncio.sleep(1)\n else:\n if name.id not in self.db[server.id]:\n await self.bot.say(\"{} is not in the list, please make sure they have been added first to \"\n \"the list.\".format(name.display_name))\n return\n elif name.id in self.db[server.id]:\n del self.db[server.id][name.id]\n self.save_db()\n await self.bot.say(\"{} has been deleted from the list.\".format(name.display_name))", "def delete(self, name):\n\n for i in self.bots:\n if i.name == name:\n i.exit()\n self.remove(i)\n i.cfg['enable'] = 0\n i.cfg.save()\n logging.debug('%s disabled' % i.name)\n return 1", "def delete_gkeeper(alist):\n\n res = [player for player in alist if player[2] != ['Por']]\n\n return res", "def delete_bot(self, team):\n if not len(self.factory.bots[team]):\n return self.OK_OBJ\n\n bot = self.factory.bots[team].pop(0)\n uid = bot.get_uid()\n self.hcriat.del_creature_by_uid(uid)\n if bot.loop.running:\n bot.loop.stop()\n\n if uid in self.factory.peers:\n # disconnect bot\n self.factory.peers[uid].transport.loseConnection()\n\n self.send_client(LogoutPlayer, broadcast=True, uid=uid)\n\n return self.OK_OBJ", "def delete_leader(self):" ]
[ "0.6861239", "0.671623", "0.66893005", "0.6594528", "0.6464064", "0.6448829", "0.6392518", "0.63728786", "0.6306389", "0.6304155", "0.62954575", "0.62544626", "0.61831826", "0.61374134", "0.6090278", "0.60877824", "0.6072825", "0.60493064", "0.6018521", "0.59921765", "0.5962545", "0.59246755", "0.59146416", "0.5914618", "0.590972", "0.59024554", "0.58949685", "0.589067", "0.5877459", "0.5846615" ]
0.6754195
1
Estimate sample Lmoments, based on Fortran code written for inclusion in IBM Research Report RC20525, 'FORTRAN ROUTINES FOR USE WITH THE METHOD OF LMOMENTS, VERSION 3' by J. R. M. Hosking, IBM Research Division, T. J. Watson Research Center, Yorktown Heights, NY 10598, Version 3 August 1996.
def _estimate_lmoments(values): # we need to have at least four values in order to make a sample L-moments estimation number_of_values = np.count_nonzero(~np.isnan(values)) if number_of_values < 4: message = 'Insufficient number of values to perform sample L-moments estimation' _logger.warning(message) raise ValueError(message) # sort the values into ascending order values = np.sort(values) sums = np.zeros((3,)) for i in range(1, number_of_values + 1): z = i term = values[i - 1] sums[0] = sums[0] + term for j in range(1, 3): z -= 1 term = term * z sums[j] = sums[j] + term y = float(number_of_values) z = float(number_of_values) sums[0] = sums[0] / z for j in range(1, 3): y = y - 1.0 z = z * y sums[j] = sums[j] / z k = 3 p0 = -1.0 for _ in range(2): ak = float(k) p0 = -p0 p = p0 temp = p * sums[0] for i in range(1, k): ai = i p = -p * (ak + ai - 1.0) * (ak - ai) / (ai * ai) temp = temp + (p * sums[i]) sums[k - 1] = temp k = k - 1 lmoments = np.zeros((3,)) if sums[1] != 0: lmoments[0] = sums[0] lmoments[1] = sums[1] lmoments[2] = sums[2] / sums[1] return lmoments
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_log_moment(self, num_examples=5040):\n q = tf.cast(num_examples, tf.float64) * 1.0 / self._total_examples\n mu_1, sigma_1 = 0, 4 # mean and standard deviation\n s_1 = np.random.normal(mu_1, sigma_1, 1000)\n mu_2, sigma_2 = 1, 4 # mean and standard deviation\n s_2 = np.random.normal(mu_2, sigma_2, 1000)\n s = (1-q)*s_1 + q*s_2\n moment_1 =[0]*len(self._log_moments)\n moment_2 = [0]*len(self._log_moments)\n log_moment = [0] * len(self._log_moments)\n for i in range(len(self._log_moments)):\n for j in range(len(s_1)):\n moment_1[i] += ((s_1[j]/s[j])**self._moment_orders[i])/len(s_1)\n moment_2[i] += ((s[j] / s_1[j]) ** self._moment_orders[i]) / len(s_1)\n for i in range(len(self._log_moments)):\n log_moment[i] = math.log(abs(max(moment_1[i],moment_2[i])))\n return log_moment", "def estimate_L(da):\n from statsmodels.tsa.stattools import acf\n \n def acf_lag1(x):\n if np.sum(~np.isnan(x)) == 0: # if all NaNs\n return np.nan\n else:\n x = x[~np.isnan(x)]\n return acf(x, nlags=1)[-1]\n \n n = len(da.time.values)\n \n # DataArray of lag1 ACF coefficients\n rho_da = xr.apply_ufunc(acf_lag1, da, input_core_dims=[['time']], output_core_dims=[[]], vectorize=True, dask='allowed')\n \n # DataArray of effective sample size\n n_eff_da = n * ((1 - rho_da) / (1 + rho_da))\n \n # Initialise guess for block length\n Ls_da = xr.full_like(rho_da, 1)\n for i in range(10): # iterate to get estimate of L\n L_da = (n - Ls_da + 1) ** ( (2/3) * (1 - n_eff_da / n) )\n Ls_da = L_da\n \n return np.ceil(L_da) # round up to get block length", "def _get_Mll(self):\n\t\t_M_ll = mll.get_mll(self.W_l, self.lmax)\n\t\treturn np.float64(_M_ll)", "def Ls(GTn:torch.tensor, Mn:torch.tensor) -> torch.tensor:\n return (-(GTn * torch.log(Mn+1e-15) + (1- GTn) * torch.log((1- Mn)+1e-15))).sum()", "def calculate_ll_normal_simple(data, variances):\n\n n_cells = data.shape[0]\n n_segments = data.shape[1]\n n_states = variances.shape[1]\n\n # Create states as (n_segments, n_cells, n_states) array\n states = np.tile(np.arange(0, n_states, 1), (n_cells, n_segments, 1))\n\n # Calculate mean\n mean = states\n \n # Normal dist log likelihood\n ll = (\n -0.5 * np.log(2. * np.pi)\n -0.5 * np.log(variances[:, np.newaxis, :])\n -1. * (np.square(data[:, :, np.newaxis] - mean) /\n (2. * variances[:, np.newaxis, :])))\n ll[np.isnan(data)] = 0.\n\n return ll", "def test_LM(self):\n\t\t\n\t\tprecision = 10**-8\n\t\t\t\t \n\t\tif self.n == 1:\n\t\t\t\t \n\t\t\tP_sum = sum(self.estimate_prob('', w) for w in self.vocab)\n\t\t\t\n\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one.'\n\t\t\t\t \n\t\telif self.n == 2:\n\t\t\thistories = ['the', 'in', 'at', 'blue', 'white']\n\t\t\t\t \n\t\t\tfor h in histories:\n\t\t\t\t \n\t\t\t\tP_sum = sum(self.estimate_prob(h, w) for w in self.vocab)\n\t\t\t\t\n\t\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one for history' + h\n\t\t\t\t\t \n\t\tprint('TEST SUCCESSFUL!')", "def _update_latent_resp(data, smm_dof, posterior_nws_scale,\n posterior_nws_dof, log_smm_mixweight,\n log_det_precision, scatter):\n num_features = data.shape[1]\n\n latent_resp = (gammaln((num_features + smm_dof) / 2) - \n gammaln(smm_dof / 2) - \n (num_features / 2) * np.log(smm_dof * pi) + \n log_smm_mixweight + log_det_precision / 2 - \n ((num_features + smm_dof) / 2) * \n np.log(1 + \n (posterior_nws_dof / smm_dof).T * scatter.T + \n (num_features / \n (smm_dof * posterior_nws_scale)).T))\n\n latent_resp = normalize_logspace(latent_resp)\n return latent_resp", "def test_smoohted_LM(self):\n\t\tprecision = 10**-8\n\t\t\t\t \n\t\tif self.n == 1:\n\t\t\t\t \n\t\t\tP_sum = sum(self.estimate_smoothed_prob('', w) for w in self.vocab)\n\t\t\t\n\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one.'\n\t\t\t\t \n\t\telif self.n == 2:\n\t\t\thistories = ['the', 'in', 'at', 'blue', 'white']\n\t\t\t\t \n\t\t\tfor h in histories:\n\t\t\t\t \n\t\t\t\tP_sum = sum(self.estimate_smoothed_prob(h, w) for w in self.vocab)\n\t\t\t\t\n\t\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one for history' + h\n\t\t\t\t\t \n\t\tprint('TEST SUCCESSFUL!')\n\t\t# YOUR CODE HERE", "def lmoment(inlist,moment=1):\r\n if moment == 1:\r\n return 0.0\r\n else:\r\n mn = mean(inlist)\r\n n = len(inlist)\r\n s = 0\r\n for x in inlist:\r\n s = s + (x-mn)**moment\r\n return s/float(n)", "def msll(Y_true, Y_pred, V_pred, Y_train):\n mt, st = Y_train.mean(), Y_train.std()\n ll = norm.logpdf(Y_true, loc=Y_pred, scale=np.sqrt(V_pred))\n rand_ll = norm.logpdf(Y_true, loc=mt, scale=st)\n msll = - (ll - rand_ll).mean()\n return msll", "def evaluate_mc_sample_v1(grid, survey, pset, sample, opdir=\"Plots\"):\n t0 = time.process_time()\n\n nsamples = sample.shape[0]\n\n # get number of FRBs per sample\n Npersurvey = survey.NFRB\n # determines how many false surveys we have stats for\n Nsurveys = int(nsamples / Npersurvey)\n\n print(\n \"We can evaluate \",\n Nsurveys,\n \"MC surveys given a total of \",\n nsamples,\n \" and \",\n Npersurvey,\n \" FRBs in the original data\",\n )\n\n # makes a deep copy of the survey\n s = copy.deepcopy(survey)\n\n lls = []\n # Data order is DM,z,b,w,s\n # we loop through, artificially altering the survey with the composite values.\n for i in np.arange(Nsurveys):\n this_sample = sample[i * Npersurvey : (i + 1) * Npersurvey, :]\n s.DMEGs = this_sample[:, 0]\n s.Ss = this_sample[:, 4]\n if s.nD == 1: # DM, snr only\n ll = it.calc_likelihoods_1D(grid, s, pset, psnr=True, Pn=True, dolist=0)\n else:\n s.Zs = this_sample[:, 1]\n ll = it.calc_likelihoods_2D(grid, s, pset, psnr=True, Pn=True, dolist=0)\n lls.append(ll)\n t1 = time.process_time()\n dt = t1 - t0\n print(\"Finished after \", dt, \" seconds\")\n\n lls = np.array(lls)\n\n plt.figure()\n plt.hist(lls, bins=20)\n plt.xlabel(\"log likelihoods [log10]\")\n plt.ylabel(\"p(ll)\")\n plt.xticks(rotation=90)\n plt.tight_layout()\n plt.savefig(opdir + \"/ll_histogram.pdf\")\n plt.close()", "def generate_random_MT_Lune_samp():\n # 1. Get randomly varied Lune parameters (gamma and delta):\n # Define U rotation matrix (See Tape and Tape 2012/2013):\n U_rot_matrix = (1./np.sqrt(6))*np.vstack(([np.sqrt(3.),0.,-np.sqrt(3.)],[-1.,2.,-1.], [np.sqrt(2.),np.sqrt(2.),np.sqrt(2.)]))\n # Get a random sample 3-vector on a 3-unit sphere to use to calculate random delta and gamma Lune angles:\n delta = np.random.uniform(-np.pi/2., np.pi/2.) # theta, but shifted to range between -pi/2 and pi/2 (See Tape and Tape 2012/2013)\n beta = (np.pi/2.) - delta # Beta is simply phase shift of delta (See Tape and Tape 2012/2013)\n gamma = np.random.uniform(-np.pi/6., np.pi/6.) # phi, but shifted to range between -pi/6 and pi/6 (See Tape and Tape 2012/2013)\n # Get eigenvalues from delta,gamma,beta:\n lune_space_uvw_vec = np.vstack(([np.cos(gamma)*np.sin(beta)], [np.sin(gamma)*np.sin(beta)], [np.cos(beta)]))\n lambda_vec = np.dot(np.transpose(U_rot_matrix), lune_space_uvw_vec) # (See Tape and Tape 2012, eq. 20)\n Lune_space_MT = np.vstack(([lambda_vec[0],0.,0.],[0.,lambda_vec[1],0.], [0.,0.,lambda_vec[2]])) # MT with principle axes in u,v,w Lune space\n # 2. Get theta and phi angles to rotate Lune_space_MT by to randomly rotate into x,y,z space:\n # Get a random sample 3-vector on a 3-unit sphere to use to calculate random theta and phi rotation angles:\n a_unnormalised = np.array([np.random.normal(loc=0.0, scale=1.0), np.random.normal(loc=0.0, scale=1.0), np.random.normal(loc=0.0, scale=1.0)], dtype=float) # Generate 3 indepdendent normal deviates\n a_normalised = a_unnormalised/(np.sum(a_unnormalised**2)**-0.5) # Normallise sample onto unit 3-sphere - As in Muller (1959)\n # And normallise so that vector magnitude = 1:\n a_normalised = a_normalised/((np.sum(a_normalised**2))**0.5)\n x = a_normalised[0]\n y = a_normalised[1]\n z = a_normalised[2]\n theta = np.arctan2(np.sqrt((x**2)+(y**2)),z)\n phi = np.arctan2(y,x)\n # 3. Rotate Lune_space_MT from u,v,w coords to x,y,z coords:\n random_MT = rot_mt_by_theta_phi(Lune_space_MT, theta, phi)\n random_six_MT = get_six_MT_from_full_MT_array(random_MT)\n # And normallise so that moment tensor magnitude = 1:\n random_six_MT_normalised = random_six_MT/((np.sum(random_six_MT**2))**0.5)\n # And set to correct dimensions (so matrix multiplication in forward model works correctly):\n random_six_MT_normalised = np.reshape(random_six_MT_normalised, (6, 1))\n return random_six_MT_normalised", "def mm_lrt_test(y, K):\n lm = LinearModel(y)\n lmm = LinearMixedModel(y)\n lmm.add_random_effect(K)\n lmm_res = lmm.get_ML()\n ll0 = lm.get_ll()\n ll1 = lmm_res['max_ll']\n D = 2 * (ll1 - ll0)\n pval = stats.chi2.sf(D, 1)\n return {'pval':pval, 'lrt_stat':D}", "def test_lfc_ml2():\n levels = np.array([1024.95703125, 1016.61474609, 1005.33056641, 991.08544922, 973.4163208,\n 951.3381958, 924.82836914, 898.25482178, 873.46124268, 848.69830322,\n 823.92553711, 788.49304199, 743.44580078, 700.50970459, 659.62017822,\n 620.70861816, 583.69421387, 548.49719238, 515.03826904, 483.24401855,\n 453.0418396, 424.36477661, 397.1505127, 371.33441162, 346.85922241,\n 323.66995239, 301.70935059, 280.92651367, 261.27053833, 242.69168091,\n 225.14237976, 208.57781982, 192.95333862, 178.22599792, 164.39630127,\n 151.54336548, 139.68635559, 128.74923706, 118.6588974, 109.35111237,\n 100.76405334, 92.84288025, 85.53556824, 78.79430389, 72.57549286,\n 66.83885193, 61.54678726, 56.66480637, 52.16108322]) * units.mbar\n temperatures = np.array([6.00750732, 5.14892578, 4.177948, 3.00268555, 1.55535889,\n -0.25527954, -1.93988037, -3.57766724, -4.40600586, -4.19238281,\n -3.71185303, -4.47943115, -6.81280518, -8.08685303, -8.41287231,\n -10.79302979, -14.13262939, -16.85784912, -19.51675415,\n -22.28689575, -24.99938965, -27.79664612, -30.90414429,\n -34.49435425, -38.438797, -42.27981567, -45.99230957,\n -49.75340271, -53.58230591, -57.30686951, -60.76026917,\n -63.92070007, -66.72470093, -68.97846985, -70.4264679,\n -71.16407776, -71.53797913, -71.64375305, -71.52735901,\n -71.53523254, -71.61097717, -71.92687988, -72.68682861,\n -74.129776, -76.02471924, -76.88977051, -76.26008606,\n -75.90351868, -76.15809631]) * units.celsius\n dewpoints = np.array([4.50012302, 3.42483997, 2.78102994, 2.24474645, 1.593485, -0.9440815,\n -3.8044982, -3.55629468, -9.7376976, -10.2950449, -9.67498302,\n -10.30486488, -8.70559597, -8.71669006, -12.66509628, -18.6697197,\n -23.00351334, -29.46240425, -36.82178497, -41.68824768, -44.50320816,\n -48.54426575, -52.50753403, -51.09564209, -48.92690659, -49.97380829,\n -51.57516098, -52.62096405, -54.24332809, -57.09109879, -60.5596199,\n -63.93486404, -67.07530212, -70.01263428, -72.9258728, -76.12271881,\n -79.49847412, -82.2350769, -83.91127014, -84.95665741, -85.61238861,\n -86.16391754, -86.7653656, -87.34436035, -87.87495422, -88.34281921,\n -88.74453735, -89.04680634, -89.26436615]) * units.celsius\n __, t_mixed, td_mixed = mixed_parcel(levels, temperatures, dewpoints)\n mixed_parcel_prof = parcel_profile(levels, t_mixed, td_mixed)\n lfc_pressure, lfc_temp = lfc(levels, temperatures, dewpoints, mixed_parcel_prof, td_mixed)\n assert_almost_equal(lfc_pressure, 962.34 * units.mbar, 2)\n assert_almost_equal(lfc_temp, 0.767 * units.degC, 2)", "def experiment_linear_l1(_):\n # Attack epsilon is manually set according to the norm of the min-norm\n # solution found using cvxpy for d/n=10. That is max-margin=1/min-norm.\n # Min linf-norm solution found (norm=0.0422)\n # Min l2-norm solution found (norm=0.3411)\n # Min l1-norm solution found (norm=1.8497)\n # Min l4-norm solution found (norm=0.0002)\n # Min l1.5-norm solution found (norm=0.5274)\n return experiment_linear_lp(\n adv_norm_type='l1',\n dual_norm_type='linf',\n baseline_norm_types=['l2'],\n attack_step_dir='grad_max')", "def get_phi_lam_obs(z, qlf, lLfrac_lam_obs_min, lLfrac_lam_obs_max, lam_eff_filter):\n\n #Start by getting the value of Lstar in units of 10^10 Lsun, which will be useful later on.\n Lstar = 10.**(qlf.log_Lstar(z))*qlf.Lstar_units\n Lstar_10 = (Lstar/(1e10*L_sun)).to(1.).value\n\n #Set the grid in bolometric L/Lstar.\n lLfrac_min = -3.0\n lLfrac_max = 3.0 #10.0\n dlLfrac = 0.01\n lLfrac = np.arange(lLfrac_min,lLfrac_max,dlLfrac)\n Lfrac = 10.**lLfrac\n\n #Get the bolometric QLF evaluated in the grid of Lfrac.\n phi_bol = qlf.phi_bol_Lfrac(Lfrac, z)\n\n #Transform the bolometric QLF to the intrinsic luminosity QLF in the band. We assume that the bolometric correction in all bands of interest is proportional to the one in the B-band, as is done in the Hopkins07 provided code.\n phi_lam = phi_bol*jacobian(Lfrac, Lstar_10, qlf)\n Lfrac_lam = get_Lfrac_lam(Lfrac, Lstar_10, qlf)\n lLfrac_lam = np.log10(Lfrac_lam)\n #dlLfrac_lam = dlLfrac/jacobian(Lfrac, Lstar_10, qlf)\n\n #Since there is a natural dispersion to the bolometric corrections, we convolve phi_lam with the uncertainty function to take it into account.\n phi_lam_2D = np.tile(phi_lam, (len(phi_lam), 1))\n sigma = qlf.get_sigma(Lfrac, Lstar_10, lam_eff_filter/(1.+z))\n lLfrac_lam_sig = lLfrac_lam\n sigma_2D = np.tile(sigma, (len(sigma), 1))\n lLfrac_lam_2D = np.tile(lLfrac_lam, (len(lLfrac_lam), 1))\n lLfrac_lam_sig_2D = np.tile(lLfrac_lam_sig, (len(lLfrac_lam), 1)).T\n\n p = (2.*np.pi)**-0.5 * sigma_2D**-1 * np.exp( -0.5*( (lLfrac_lam_sig_2D - lLfrac_lam_2D)/sigma_2D)**2)\n\n phi_lam_sig = np.sum(phi_lam_2D*p * dlLfrac, axis=1)\n\n #The next step is to convolve with the obscuration function. The issue here is that the observed luminosity in the band is a function of the intrinsic luminosity and the obscuration.\n lNH_min = 20.\n lNH_max = 26.\n dlNH = 0.01\n lNH = np.arange(lNH_min, lNH_max, dlNH)\n\n #Following the approach of the Shen20 pubtools, we will now calculate phi_lam_obs for the same luminosity fractions for which we have phi_lam.\n lLfrac_lam_obs_grid = lLfrac_lam_sig\n\n #Determine the obscuration function in the observed band.\n ltheta_fact = 0.4*qlf.dgr(z).to(u.cm**2).value*1e22 * qlf.xi(lam_eff_filter/(1.+z))\n ltheta = 10.**(lNH-22) * ltheta_fact\n ltheta_2D = np.tile(ltheta, [len(lLfrac_lam_obs_grid), 1])\n\n #For each NH, we will need to evaluate the unreddened QLF at a luminosity of lLfrac_lam_obs_grid + ltheta. So let's build it as a 2D array in which each row has the same lLfrac_lam_obs_grid value modified by the reddening correction (i.e., unreddened assuming different levels of obscuration).\n lLfrac_lam_sig_eval_2D = np.tile(lLfrac_lam_obs_grid, [len(lNH), 1]).T + ltheta_2D\n\n #Now, evaluate the f_NH function, following the S20 pubtools. Note: I think this actually wrong. f_NH should be evaluated at the intrinsic luminosity fraction of the reddening corrected luminosity. Here, we just assume that the same intrinsic lLfrac corresponds to the observed lLfrac_lam_obs_grid value for all NHs.\n lLfrac_eval_2D = np.tile(lLfrac, [len(lNH),1]).T\n log_NH_2D = np.tile(lNH, [len(lLfrac_lam_obs_grid), 1])\n f_NH = qlf.fNH(log_NH_2D, lLfrac_eval_2D, Lstar_10, z)\n\n #Extrapolate phi_lam_sig so that we can evaluate it in the new positions.\n log_phi_lam_sig_interp = interp1d(lLfrac_lam_sig, np.log10(phi_lam_sig.value), kind='linear', fill_value = 'extrapolate')\n\n #Evaluate it an produce phi_lam_obs_grid by integrating over f_NH dlNH.\n phi_lam_sig_eval_2D = 10.**(log_phi_lam_sig_interp(lLfrac_lam_sig_eval_2D))\n phi_lam_obs_grid= np.sum(phi_lam_sig_eval_2D * f_NH * dlNH, axis=1)\n\n #Now, this is the output grid we actually want.\n nlLfrac_lam_obs = 100\n dlLfrac_lam_obs = (lLfrac_lam_obs_max-lLfrac_lam_obs_min)/nlLfrac_lam_obs\n if dlLfrac_lam_obs > 0.1:\n dlLfrac_lam_obs = 0.1\n lLfrac_lam_obs = np.arange(lLfrac_lam_obs_min, lLfrac_lam_obs_max + 0.1*dlLfrac_lam_obs, dlLfrac_lam_obs)\n\n #Interpolate/extrapolate phi_lam_obs to put it in the required output grid and return the resulting QLF.\n lphi_lam_obs_interp = interp1d(lLfrac_lam_obs_grid, np.log10(phi_lam_obs_grid), fill_value='extrapolate')\n phi_lam_obs = 10.**(lphi_lam_obs_interp(lLfrac_lam_obs))*phi_lam_sig.unit\n return phi_lam_obs, dlLfrac_lam_obs*u.dex", "def NLL(sample, params):\n mu = params[:,:,0]\n logsigma = params[:,:,1]\n \n c = normalization.to(mu.device)\n inv_sigma = torch.exp(-logsigma)\n tmp = (sample - mu) * inv_sigma\n return torch.mean(0.5 * (tmp * tmp + 2 * logsigma + c))", "def calc_Lr(rho,mld,f,g=9.8,po=1027.):\n n2ml=np.ndarray(len(rho[1,:-1]))\n for i in range(len(rho[1,:-1])):\n n2ml[i]=-(g/po)*((rho[15,i]-rho[np.int8(mld[i])+15,i])/mld[i])\n Lr=(np.sqrt(n2ml)*mld[:-1])/f\n\n return Lr", "def lam(E):\n return (12398.4/E)*1e-10", "def marginal_ln_likelihood(samples, prior, data):\n n_samples = len(samples)\n n_linear = len(prior._linear_equiv_units)\n mu = np.zeros(n_linear)\n\n marg_ll = np.zeros(n_samples)\n for n, M, Lambda, ivar, *_ in get_M_Lambda_ivar(samples, prior, data):\n try:\n marg_ll[n], *_ = likelihood_worker(data.rv.value, ivar, M,\n mu, np.diag(Lambda),\n make_aA=False)\n except np.linalg.LinAlgError as e:\n raise e\n\n return marg_ll", "def univariate_dlm_simulation(F,G,W,v,initial_state,n,T):\n \n ZEROS = np.zeros(n)\n \n emissions = np.zeros([T,1])\n state = np.zeros([T,n])\n \n state[0] = initial_state\n emissions[0] = F.dot(initial_state) + np.random.normal(loc = 0.0,scale = v)\n \n for t in range(T):\n state[t] = G.dot(state[t-1]) + np.random.multivariate_normal(ZEROS,W)\n emissions[t] = F.dot(state[t]) + np.random.normal(0.0, v)\n \n return state,emissions", "def getLML(self):\n assert self.init, 'VarianceDecomposition:: GP not initialised'\n return self.gp.LML()", "def _LL(state, effects, observed_frequencies) -> float:\n observed_frequencies = np.array(observed_frequencies)\n predicted_probs = np.array([np.real(np.trace(state.dot(effect))) for effect in effects])\n return sum(np.log10(predicted_probs) * observed_frequencies)", "def test_MAR_est_LWR():\r\n\r\n # This is the same processes as those in doc/examples/ar_est_2vars.py:\r\n a1 = np.array([[0.9, 0],\r\n [0.16, 0.8]])\r\n\r\n a2 = np.array([[-0.5, 0],\r\n [-0.2, -0.5]])\r\n\r\n am = np.array([-a1, -a2])\r\n\r\n x_var = 1\r\n y_var = 0.7\r\n xy_cov = 0.4\r\n cov = np.array([[x_var, xy_cov],\r\n [xy_cov, y_var]])\r\n\r\n n_freqs = 1024\r\n w, Hw = tsa.transfer_function_xy(am, n_freqs=n_freqs)\r\n Sw = tsa.spectral_matrix_xy(Hw, cov)\r\n\r\n # This many realizations of the process:\r\n N = 500\r\n # Each one this long\r\n L = 1024\r\n\r\n order = am.shape[0]\r\n n_lags = order + 1\r\n\r\n n_process = am.shape[-1]\r\n\r\n z = np.empty((N, n_process, L))\r\n nz = np.empty((N, n_process, L))\r\n\r\n for i in range(N):\r\n z[i], nz[i] = utils.generate_mar(am, cov, L)\r\n\r\n a_est = []\r\n cov_est = []\r\n\r\n # This loop runs MAR_est_LWR:\r\n for i in range(N):\r\n Rxx = (tsa.MAR_est_LWR(z[i], order=n_lags))\r\n a_est.append(Rxx[0])\r\n cov_est.append(Rxx[1])\r\n\r\n a_est = np.mean(a_est, 0)\r\n cov_est = np.mean(cov_est, 0)\r\n\r\n # This tests transfer_function_xy and spectral_matrix_xy:\r\n w, Hw_est = tsa.transfer_function_xy(a_est, n_freqs=n_freqs)\r\n Sw_est = tsa.spectral_matrix_xy(Hw_est, cov_est)\r\n\r\n # coherence_from_spectral:\r\n c = tsa.coherence_from_spectral(Sw)\r\n c_est = tsa.coherence_from_spectral(Sw_est)\r\n\r\n # granger_causality_xy:\r\n\r\n w, f_x2y, f_y2x, f_xy, Sw = tsa.granger_causality_xy(am,\r\n cov,\r\n n_freqs=n_freqs)\r\n\r\n w, f_x2y_est, f_y2x_est, f_xy_est, Sw_est = tsa.granger_causality_xy(a_est,\r\n cov_est,\r\n n_freqs=n_freqs)\r\n\r\n # interdependence_xy\r\n i_xy = tsa.interdependence_xy(Sw)\r\n i_xy_est = tsa.interdependence_xy(Sw_est)\r\n\r\n # This is all very approximate:\r\n npt.assert_almost_equal(Hw, Hw_est, decimal=1)\r\n npt.assert_almost_equal(Sw, Sw_est, decimal=1)\r\n npt.assert_almost_equal(c, c_est, 1)\r\n npt.assert_almost_equal(f_xy, f_xy_est, 1)\r\n npt.assert_almost_equal(f_x2y, f_x2y_est, 1)\r\n npt.assert_almost_equal(f_y2x, f_y2x_est, 1)\r\n npt.assert_almost_equal(i_xy, i_xy_est, 1)", "def HybridMC(L, beta, h, n, Nsteps, metropolize=False, getMags=False):\n xy = XYmodel.XYmodel(L, beta)\n\n if getMags:\n mag = np.zeros(Nsteps+1)\n mag[0] = xy.cosMagnetVector()\n\n if metropolize:\n rejected = 0\n\n\n # Do Nsteps of MCMC.\n for k in range(1, Nsteps + 1):\n\n yt_old = np.random.randn(L) # \\tilde{Y}^{(k-1)} drawn from exp(-K(x))\n\n # Run the Hamiltonian dynamics using velocity verlet for n steps.\n [yh, yt] = velocityVerlet(xy, xy.theta, yt_old, h, n)\n\n if metropolize:\n # Compute the acceptance probability.\n if np.log(np.random.rand()) < logp_acc(xy, xy.theta, yt_old, yh, yt):\n # Note that we do not need to reset the momentum variables\n # because we sample them at each step independently.\n xy.set(yh)\n else:\n rejected += 1\n else:\n xy.set(yh)\n\n if getMags:\n mag[k] = xy.cosMagnetVector()\n\n if metropolize:\n rej_rate = float(rejected)/Nsteps\n\n if getMags and metropolize:\n return [xy, rej_rate, mag]\n elif getMags and not metropolize:\n return [xy, mag]\n elif not getMags and metropolize:\n return [xy, rej_rate]\n else:\n return xy", "def mme_geo(samples, moment=1):\n samples = samples ** moment\n k = len(samples)\n return ( k / np.sum(samples))", "def estimate_implicit_moments(config, shared, task_id, hnet, hhnet, num_samples,\n device):\n theta = None\n if hhnet is not None:\n theta = hhnet.forward(cond_id=task_id)\n\n samples = torch.empty((num_samples, hnet.num_outputs)).to(device)\n\n for j in range(num_samples):\n z = torch.normal(torch.zeros(1, shared.noise_dim), config.latent_std).\\\n to(device)\n\n weights = hnet.forward(uncond_input=z, weights=theta)\n\n samples[j, :] = torch.cat([p.detach().flatten() for p in weights])\n\n sample_mean = samples.mean(dim=0)\n sample_std = samples.std(dim=0)\n\n return sample_mean, sample_std", "def test_loglike(dlm,Cl,noise,beam):\n lmax = Cl.shape[0]\n tt_exp = -1./2 * np.real(np.vdot(dlm.T,hp.almxfl(dlm,1/(beam[:lmax]**2*Cl[:,1]+noise[:lmax]))))\n #plt.plot(Cl[:,1])\n tt_det = - 1./2 *(np.arange(1,lmax+1)*np.log((noise[:lmax]+Cl[:,1]*beam[:lmax]**2))).sum() \n tt_f = tt_exp + tt_det\n return tt_exp,tt_det,tt_f#,Cl[:,1]", "def modelMoments(lm, theta, line=0, order=-1):\n noise, center, scale, herm = lm.unpackTheta(theta)\n\n # Since the Probablist's Hermite functions are orthogonal given the unit normal,\n # to integrate the mean and variance just get the weights multiplied by x.\n hermx = hermemulx(herm[line][0:1])\n hermxx = hermemulx(hermx)\n\n normFactor = np.sqrt(2*np.pi)*scale[line]\n m0 = normFactor*herm[line][0]\n m1 = (center[line] - lm.linesLam[line])*m0 + normFactor*hermx[0]*scale[line]\n m2 = normFactor*hermxx[0]*scale[line]**2\n\n return np.array([m0, m1*1e3, m2*1e6])", "def experiment4(L=10):\n def apw_fit(sigma, mu, Ne):\n code = sample_code(L, sigma)\n def apw_phat(site):\n ep = score(code, site)\n return 1/(1+exp(ep-mu))**(Ne-1)\n chain = mh(lambda s:apw_phat(s), proposal=mutate_site, x0=random_site(L),\n capture_state = lambda s:apw_occ(code, mu, s))[25000:]\n return mean(chain)\n def linear_fit(sigma, mu, Ne):\n pssm = sample_matrix(L, sigma)\n def linear_phat(site):\n ep = score_seq(pssm, site)\n return 1/(1+exp(ep-mu))**(Ne-1)\n chain = mh(lambda s:linear_phat(s), proposal=mutate_site, x0=random_site(L),\n capture_state = lambda s:linear_occ(pssm, mu, s))[25000:]\n return mean(chain)\n def apw_occ(code, mu, site):\n ep = score(code, site)\n return 1/(1+exp(ep-mu))\n def linear_occ(pssm, mu, site):\n ep = score_seq(pssm, site)\n return 1/(1+exp(ep-mu))\n sigmas = np.linspace(0,5,5)\n mus = np.linspace(-10,10,5)\n Nes = np.linspace(0,5,5)\n apws = [apw_fit(sigma, mu, Ne) for sigma in tqdm(sigmas) for mu in mus for Ne in Nes]\n linears = [linear_fit(sigma, mu, Ne) for sigma in tqdm(sigmas) for mu in mus for Ne in Nes]" ]
[ "0.5974585", "0.5965852", "0.59529734", "0.5871154", "0.5866207", "0.5765351", "0.57264125", "0.5722149", "0.5719706", "0.5702045", "0.5696745", "0.56964016", "0.56698036", "0.56648046", "0.5660657", "0.5654306", "0.56494606", "0.56422335", "0.5630442", "0.5622559", "0.5617974", "0.5612292", "0.5611061", "0.5600613", "0.55601823", "0.55396277", "0.55334586", "0.55260116", "0.5521293", "0.5516395" ]
0.75961566
0
Universal function (ufunc) used to perform fitting of a value to a Pearson Type III distribution as described by the Pearson Type III parameters and probability of zero arguments.
def _pearson_fit_ufunc(value_to_fit, pearson_param_1, pearson_param_2, pearson_param_3, probability_of_zero): fitted_value = np.NaN # only fit to the distribution if the value is valid/not missing if not math.isnan(value_to_fit): # get the Pearson Type III cumulative density function value pe3_cdf = 0.0 #TODO questions for Trevor/Richard/Deke -- what is the significance of the value 0.0005 below? # Is this a trace precip value or a floor probability value, etc.? # handle trace amounts as a special case if value_to_fit < 0.0005: if probability_of_zero > 0.0: pe3_cdf = 0.0 else: pe3_cdf = 0.0005 # minimum probability else: # calculate the CDF value corresponding to the value pe3_cdf = _pearson3cdf(value_to_fit, [pearson_param_1, pearson_param_2, pearson_param_3]) if not math.isnan(pe3_cdf): # calculate the probability value, clipped between 0 and 1 probability_value = np.clip((probability_of_zero + ((1.0 - probability_of_zero) * pe3_cdf)), 0.0, 1.0) # the values we'll return are the values at which the probabilities of a normal distribution are # less than or equal to the computed probabilities, as determined by the normal distribution's # quantile (or inverse cumulative distribution) function fitted_value = scipy.stats.norm.ppf(probability_value) return fitted_value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fu_fit(fu, v):\n popt, pcov = curve_fit(\n lambda v, a : beta.cdf(v, a, 1),\n v, fu,\n p0 = (1,)\n )\n chi2 = np.sum((beta.cdf(v, *popt, 1) - fu)**2 / (len(v)-2))\n res = np.zeros(len(popt)+1)\n res[0:1] = popt\n res[1] = math.sqrt(chi2)\n return res", "def scalar():\n # noinspection PyUnusedLocal\n def sf(x, params, const_list, const_dict):\n a = params[0]\n return a\n return FitFunction(func=sf, num_fit_params=1, name='scalar', code='s')", "def qfFunction(f, x, N):\r\n return ssstats.binom.ppf(x, N, f)", "def func1(x,u):\r\n return 5*x*u+(x+7)*np.sin(x)", "def _ucb(x, gp, kappa):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n mean, std = gp.predict(x, return_std=True)\n\n return mean + kappa * std", "def peturb(param):\n ann = param.annotation\n if ann == inspect._empty:\n ann = 'normal'\n if type(ann)==str:\n if ann == 'normal':\n return param.default + np.random.normal()\n elif ann == 'positive':\n return abs(param.default + np.random.normal())\n elif type(ann) == tuple:\n # Get a number from uniform random distribution\n # bounded by values in the annotation tuple.\n if type(ann[0]) == float:\n return np.random.uniform(*ann)\n elif type(ann[0]) == int:\n return np.random.randint(*ann)\n else:\n print('Unrecognised function annotation.')", "def EvaluateFunction(self, p_float=..., p_float=..., p_float=...):\n ...", "def eval_f(self, u, t):\n\n f = self.dtype_f(self.init)\n f[:] = -1.0 * self.A.dot(u)\n return f", "def autofit(fit_func, x, y, xp, yp):\n\n a = (y[-1] - y[0]) / (x[-1] - x[0])\n b = y[0] - a * x[0]\n eta = 0.5\n a1 = a2 = yp - linear(xp, a, b)\n x0 = xp\n mu = xp\n gamma = estimate_fwhm(x, y - linear(x, a, b), yp - linear(xp, a, b)) / 2\n sigma = 0.8493 * gamma\n\n p0 = [a, b, eta, a1, x0, gamma, a2, mu, sigma]\n\n # print(\"p0 =\", p0)\n\n # in addition to the mathematical bounds, we add bounds for x0 and mu which should lie very close to xp\n # since otherwise curve_fit will try to send one to infinity if the respective distribution has\n # low influence on the peak which would be unphysical\n lower_bounds = [-np.inf, -np.inf, 0.0, 0.0, xp - 30, 0.0, 0.0, xp - 30, 0.0]\n upper_bounds = [np.inf, np.inf, 1.0, np.inf, xp + 30, np.inf, np.inf, xp + 30, np.inf]\n bounds = lower_bounds, upper_bounds\n\n popt, pcov = curve_fit(fit_func, x, y, p0=p0, bounds=bounds, maxfev=10000)\n\n return popt, pcov", "def fitfunc_3G1Ysq(a_x, a_p1, a_p2, a_p3, a_p4, a_p5, a_p6, a_p7, a_p8, a_p9):\n \n# if (a_x != 0.0):\n if (True):\n return (a_p1 * exp(-pow(a_x/a_p2, 2)) + a_p3 * exp(-pow(a_x/a_p4, 2)) +\n a_p5 * exp(-pow(a_x/a_p6, 2)) +\n a_p7 * (1.0 - exp(-a_p8 * a_x**2))**2 * exp(-2.0 * a_p9 * a_x) / a_x**2)\n else:\n return (a_p1 * exp(-pow(a_x/a_p2, 2)) + a_p3 * exp(-pow(a_x/a_p4, 2)) +\n a_p5 * exp(-pow(a_x/a_p6, 2)))", "def _beam_fit_fn_3(z, z0, Theta):\n return (Theta*(z-z0))**2", "def curvefit(self, series):\n\t\tparameters = johnsonsu.fit(series)\n\t\tfunc = johnsonsu(*parameters)\n\t\tD, p = kstest(series, 'johnsonsu', N = len(series), args=parameters)\n\t\tif p < 0.1 or func.mean() < 0.5:\n\t\t\treturn None\n\t\treturn func.mean() - func.std() / 2", "def __call__(self, u, t):\n S, I, R, V = u\n return [-self.beta(t)*S*I-self.p(t)*S,\n self.beta(t)*S*I-self.nu(t)*I,self.nu(t)*I,\n self.p(t)*S]", "def u_exact(self, t):\n\n me = self.dtype_u(self.init)\n if self.params.freq >= 0:\n xvalues = np.array([i * self.dx for i in range(self.params.nvars)])\n me[:] = np.sin(np.pi * self.params.freq * (xvalues - self.params.c * t))\n else:\n np.random.seed(1)\n me[:] = np.random.rand(self.params.nvars)\n return me", "def __call__(self, u, t):\n S, I, R = u\n return [-self.beta(t)*S*I, self.beta(t)*S*I - self.nu(t)*I, self.nu(t)*I]", "def F(self, t, x, **params):\n return 0.*x", "def multivariate_gauss_prob(observed, mean, covariance):\n\n return None", "def x_to_u(self, x):\n return stats.norm.ppf(self.CDF(x))", "def prelu(input, weight):\n return FunctionLib.apply('PRelu', input.device, [input, weight])", "def fitfunc_1G1Y(a_x, a_p1, a_p2, a_p3, a_p4, a_p5):\n \n# if (a_x != 0.0):\n if (True):\n return (a_p1 * exp(-pow(a_x/a_p2, 2)) + a_p3 * (1.0 - exp(-a_p4 * a_x**2)) * exp(-a_p5 * a_x) / a_x)\n else:\n return a_p1", "def f_UPPS_pc(v, P_0, r_f, d, s, T, wealth, phi, n_s, n_o, K):\n W_T = f_W_T_pc(v, P_0, r_f, d, s, T, wealth, phi, n_s, n_o, K)\n value = pow(W_T, -gamma) * f_W_T_to_P_T_pc(v, P_0, r_f, d, s, T, wealth, phi, n_s, n_o, K) * f_P_T_to_P_0(v, r_f, d, s, T)\n return value", "def f_U_function(W, gamma):\n if W > 0:\n if gamma == 1:\n utility = math.log(W)\n elif gamma >= 0:\n # Minus 1 or not.\n utility = math.pow(W, 1 - gamma) / (1 - gamma)\n else:\n print('The risk aversion parameter should be non-negative numbers.')\n else:\n print('The wealth should be non-negative. Now {}.'.format(W))\n utility = 0\n return utility", "def objective(data: VLEPoints, params: typing.List[float]) -> float:\n error = 0\n mixture = Mixture(\n name=\"\",\n first_component=data.components[0],\n second_component=data.components[1],\n uniquac_params=UNIQUACParameters.from_array(params),\n )\n for point in data:\n error += (get_partial_pressures(temperature=point.temperature,\n composition=point.composition,\n mixture=mixture,\n calculation_type=\"UNIQUAC\",\n )[0] - point.pressures[0]) ** 2 \\\n + (get_partial_pressures(temperature=point.temperature,\n composition=point.composition,\n mixture=mixture,\n calculation_type=\"UNIQUAC\",\n )[1] - point.pressures[1]) ** 2\n return numpy.sqrt(error / len(data))", "def kind_utility_func(x):\n mean = [0.7, 0.3]\n s_1 = 0.3\n s_2 = 0.2\n r_12 = 0.0\n cov = [[s_1**2, r_12*s_1*s_2], \n [r_12*s_1*s_2, s_2**2]]\n rv = multivariate_normal(mean, cov)\n A = 1/rv.pdf(mean)\n return A*rv.pdf(x)", "def p_V_fit(guesses, volume_arr, pressure_arr, N, T):\r\n\tdef inverse_function(x, b):\r\n\t\treturn N*k_B*T/(x-N*b)\r\n\r\n\tfit \t = curve_fit(inverse_function, volume_arr, pressure_arr, p0 = guesses)\r\n\t#print('The pressure fit parameters are:', fit[0])\r\n\tdata_fit = inverse_function(volume_arr, *fit[0])\r\n\treturn data_fit, fit[0], np.sqrt(np.diag(fit[1]))", "def test_fit_default_distribution(self):\n\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n\n for i, key in enumerate(self.data.columns):\n assert copula.columns[i] == key\n assert copula.univariates[i].__class__ == GaussianUnivariate\n assert copula.univariates[i]._params['loc'] == self.data[key].mean()\n assert copula.univariates[i]._params['scale'] == np.std(self.data[key])\n\n expected_covariance = copula._get_covariance(self.data)\n assert (copula.covariance == expected_covariance).all().all()", "def generic_distribution(target, seeds, func):\n seeds = target[seeds]\n value = func.ppf(seeds)\n return value", "def percentual_bias(sim, obs, dim=\"time\"):\n # wrap numpy function\n kwargs = dict(\n input_core_dims=[[dim], [dim]], dask=\"parallelized\", output_dtypes=[float]\n )\n pbias = xr.apply_ufunc(_pbias, sim, obs, **kwargs)\n pbias.name = \"pbias\"\n return pbias", "def fit(x, a, p, b):\n return a * (p ** x) + b", "def f(p, U_ij, gamma, idens, ixmom, iymom, iener):\n\n D = U_ij[idens]\n tau = U_ij[iener]\n\n if abs(tau+p) < 1.e-6:\n u = U_ij[ixmom]\n v = U_ij[iymom]\n else:\n u = U_ij[ixmom] / (tau + p + D)\n v = U_ij[iymom] / (tau + p + D)\n\n # Lorentz factor\n W = 1.0 / np.sqrt(1.0 - u**2 - v**2)\n\n return (gamma - 1.0) * (tau + D*(1.0-W) + p*(1.0-W**2)) / W**2 - p" ]
[ "0.58594096", "0.57445276", "0.5513431", "0.5453187", "0.5377383", "0.5373627", "0.53578883", "0.53555936", "0.5353605", "0.5349096", "0.5334888", "0.5317195", "0.53059936", "0.52796435", "0.5270886", "0.52526087", "0.5245205", "0.5244957", "0.5240639", "0.523926", "0.52364886", "0.5233672", "0.52196205", "0.5195041", "0.51815516", "0.5177499", "0.5174115", "0.51700646", "0.5159614", "0.5151703" ]
0.77840495
0
Test the __init__ method when parameter 'mac' is None. Should raise an AttributeError.
def test_init_no_mac(self): # mocks of files rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3) with self.assertRaises(ValueError): ap = APInfo(port_id=1, ip="2.2.2.2", mac=None, radio_mac="bb:bb:bb:bb:bb:00", udp_port=12345, wlc_ip='1.1.1.1', gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_init_no_mac(self):\n with self.assertRaises(ValueError):\n client = ClientInfo(None, ip=\"3.3.3.3\", ap_info=self.ap)", "def test_init_valid(self):\n payload = payloads.MACResponsePayload(\n self.unique_identifier,\n self.mac_data)\n self.assertEqual(payload.unique_identifier, self.unique_identifier)\n self.assertEqual(payload.mac_data, self.mac_data)", "def test_init_valid(self):\n payload = payloads.MACRequestPayload(\n self.unique_identifier,\n self.cryptographic_parameters,\n self.data)\n self.assertEqual(payload.unique_identifier, self.unique_identifier)\n self.assertEqual(payload.cryptographic_parameters,\n self.cryptographic_parameters)\n self.assertEqual(payload.data, self.data)", "def test__init__raise_exception(self):\n self.assertRaises(TypeError, MasterNodeInterface)", "def __init__(__self__, *,\n mac_address: str,\n name: str):\n pulumi.set(__self__, \"mac_address\", mac_address)\n pulumi.set(__self__, \"name\", name)", "def test_wrong_init(self):\n xknx = XKNX()\n knxipframe = KNXIPFrame(xknx)\n with self.assertRaises(TypeError):\n knxipframe.init(23)", "def test_init_no_ip(self):\n # mocks of files\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n\n ap = APInfo(port_id=1, ip=None, mac=\"bb:bb:bb:bb:bb:bb\", radio_mac=\"bb:bb:bb:bb:bb:00\", udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)\n self.assertEqual(ap.ip, None)", "def test_init_wrong_ap_type(self):\n ap_wrong = object()\n with self.assertRaises(ValueError):\n client = ClientInfo(\"cc:cc:cc:cc:cc:cc\",\n ip=\"3.3.3.3\", ap_info=ap_wrong)", "def test_read_no_mac_data(self):\n payload = payloads.MACResponsePayload()\n args = (self.encoding_no_mac_data,)\n self.assertRaisesRegex(\n exceptions.InvalidKmipEncoding,\n \"expected mac response mac data not found\",\n payload.read,\n *args\n )", "def __init__(self, mac):\n self.go_on = True\n self.mac = mac", "def test_init_attributes(self):\n t = self.Test({'id': 1, 'poop': 'abc'})\n\n self.assertEqual(t.id, 1)\n self.assertEqual(t.name, None)\n self.assertRaises(AttributeError, t.__getattribute__, 'poop')", "def test_init():\n machine = Machine(['a', 'b', 'c', '_'])\n assert machine.alphabet == ['a', 'b', 'c', '_']\n assert machine.head is None\n assert machine.state is None\n assert machine.tape is None", "def test_init_no_ip(self):\n client = ClientInfo(\"cc:cc:cc:cc:cc:cc\", ip=None, ap_info=self.ap)\n self.assertEqual(client.ip, None)\n self.assertEqual(client.ip_bytes, None)", "def test_read_no_data(self):\n payload = payloads.MACRequestPayload()\n args = (self.encoding_no_data,)\n self.assertRaisesRegex(\n exceptions.InvalidKmipEncoding,\n \"expected mac request data not found\",\n payload.read,\n *args\n )", "def __init__(__self__, *,\n crashed: Optional[pulumi.Input[bool]] = None,\n device_out_of_memory: Optional[pulumi.Input[bool]] = None,\n failed_roboscript: Optional[pulumi.Input[bool]] = None,\n not_installed: Optional[pulumi.Input[bool]] = None,\n other_native_crash: Optional[pulumi.Input[bool]] = None,\n timed_out: Optional[pulumi.Input[bool]] = None,\n unable_to_crawl: Optional[pulumi.Input[bool]] = None):\n if crashed is not None:\n pulumi.set(__self__, \"crashed\", crashed)\n if device_out_of_memory is not None:\n pulumi.set(__self__, \"device_out_of_memory\", device_out_of_memory)\n if failed_roboscript is not None:\n pulumi.set(__self__, \"failed_roboscript\", failed_roboscript)\n if not_installed is not None:\n pulumi.set(__self__, \"not_installed\", not_installed)\n if other_native_crash is not None:\n pulumi.set(__self__, \"other_native_crash\", other_native_crash)\n if timed_out is not None:\n pulumi.set(__self__, \"timed_out\", timed_out)\n if unable_to_crawl is not None:\n pulumi.set(__self__, \"unable_to_crawl\", unable_to_crawl)", "def test_register_nonexisting_attr(self):\n pass", "def test_init_arg(self):\n b1 = BaseModel(12)\n self.assertEqual(type(b1).__name__, \"BaseModel\")\n self.assertFalse(hasattr(b1, \"12\"))", "def test_init(self):\n nt.assert_raises(Exception, CisInterface.CisInput, 'error')", "def __init__(self):\n raise NoInitiation", "def test_wrong_init(self):\n xknx = XKNX()\n knxipframe = KNXIPFrame(xknx)\n with pytest.raises(AttributeError):\n knxipframe.init(23)\n\n with pytest.raises(CouldNotParseKNXIP):\n # this is not yet implemented in xknx\n knxipframe.init(KNXIPServiceType.SEARCH_REQUEST_EXTENDED)", "def test_write_with_no_data(self):\n stream = utils.BytearrayStream()\n payload = payloads.MACRequestPayload(\n self.unique_identifier,\n self.cryptographic_parameters,\n None)\n args = (stream,)\n self.assertRaisesRegex(\n exceptions.InvalidField,\n \"The mac request data is required\",\n payload.write,\n *args\n )", "def test_001_init(self):\n self.assertRaises(TypeError,ionchrom.ionchrom)", "def _check_init(self):\n # instantiate the guest class we want to test\n system_name = 'dummy_system'\n host_name = 'dummy.domain.com'\n user = 'root'\n passwd = 'somepwd'\n extensions = {}\n guest_obj = linux.GuestLinux(\n system_name, host_name, user, passwd, extensions)\n\n # validate if attributes were correctly assigned to object\n self.assertEqual('linux', guest_obj.GUEST_ID)\n self.assertIs(system_name, guest_obj.name)\n self.assertIs(host_name, guest_obj.host_name)\n self.assertIs(user, guest_obj.user)\n self.assertIs(passwd, guest_obj.passwd)\n self.assertIs(extensions, guest_obj.extensions)\n\n # return object for further testing\n return guest_obj", "def test_init_correct(self):\n # mocks of files\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n\n ap = APInfo(port_id=1, ip=\"2.2.2.2\", mac=\"bb:bb:bb:bb:bb:bb\", radio_mac=\"bb:bb:bb:bb:bb:00\", udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)\n\n self.assertEqual(ap.ip, '2.2.2.2')", "def test_constructor_invalid():\n with pytest.raises(TypeError, match='missing 1 required positional argument'):\n PseudoPotentialData() # pylint: disable=no-value-for-parameter", "def test_prepare_mac_header(self):\n self.assertEqual(prepare_mac_header(**self.mac_plain), self.auth_plain)\n self.assertEqual(prepare_mac_header(**self.mac_body), self.auth_body)\n self.assertEqual(prepare_mac_header(**self.mac_both), self.auth_both)", "def test_raises_when_accessing_none_implementation(self):\n\n class APIObj(\n platform.APIObject,\n collections.namedtuple(\"APIObj\", \"implementation\"),\n ):\n def __new__(cls):\n return super().__new__(cls, implementation=None)\n\n obj = APIObj()\n\n with pytest.raises(AttributeError) as exc_info:\n obj.implementation # pylint: disable=pointless-statement\n\n assert \"invalid access to 'implementation': not initialized\" in str(\n exc_info.value\n )", "def __init__(__self__, *,\n crashed: bool,\n device_out_of_memory: bool,\n failed_roboscript: bool,\n not_installed: bool,\n other_native_crash: bool,\n timed_out: bool,\n unable_to_crawl: bool):\n pulumi.set(__self__, \"crashed\", crashed)\n pulumi.set(__self__, \"device_out_of_memory\", device_out_of_memory)\n pulumi.set(__self__, \"failed_roboscript\", failed_roboscript)\n pulumi.set(__self__, \"not_installed\", not_installed)\n pulumi.set(__self__, \"other_native_crash\", other_native_crash)\n pulumi.set(__self__, \"timed_out\", timed_out)\n pulumi.set(__self__, \"unable_to_crawl\", unable_to_crawl)", "def test_init(self):\n self.assertEqual(self.device_key, self.factory.device_key)", "def __init__(__self__, *,\n ios_app_info: Optional[pulumi.Input['IosAppInfoArgs']] = None,\n ios_robo_test: Optional[pulumi.Input['IosRoboTestArgs']] = None,\n ios_test_loop: Optional[pulumi.Input['IosTestLoopArgs']] = None,\n ios_xc_test: Optional[pulumi.Input['IosXcTestArgs']] = None,\n test_timeout: Optional[pulumi.Input['DurationArgs']] = None):\n if ios_app_info is not None:\n pulumi.set(__self__, \"ios_app_info\", ios_app_info)\n if ios_robo_test is not None:\n pulumi.set(__self__, \"ios_robo_test\", ios_robo_test)\n if ios_test_loop is not None:\n pulumi.set(__self__, \"ios_test_loop\", ios_test_loop)\n if ios_xc_test is not None:\n pulumi.set(__self__, \"ios_xc_test\", ios_xc_test)\n if test_timeout is not None:\n pulumi.set(__self__, \"test_timeout\", test_timeout)" ]
[ "0.80347633", "0.6937742", "0.67790335", "0.6592743", "0.65171486", "0.64225554", "0.63336587", "0.6297807", "0.6291427", "0.62434447", "0.6237877", "0.618715", "0.6131393", "0.6048131", "0.60438824", "0.6021288", "0.5993613", "0.598887", "0.5986037", "0.59799427", "0.5956695", "0.5951244", "0.59048027", "0.58983225", "0.5890639", "0.5873428", "0.58662623", "0.5863986", "0.5835541", "0.5805891" ]
0.78006566
1
Test the __init__ method when parameter 'ip' is None. Since the field is optional, it should pass.
def test_init_no_ip(self): # mocks of files rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3) ap = APInfo(port_id=1, ip=None, mac="bb:bb:bb:bb:bb:bb", radio_mac="bb:bb:bb:bb:bb:00", udp_port=12345, wlc_ip='1.1.1.1', gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file) self.assertEqual(ap.ip, None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_init_no_ip(self):\n client = ClientInfo(\"cc:cc:cc:cc:cc:cc\", ip=None, ap_info=self.ap)\n self.assertEqual(client.ip, None)\n self.assertEqual(client.ip_bytes, None)", "def __init__(__self__, *,\n ip: Optional[pulumi.Input['IPAccessControlArgs']] = None):\n if ip is not None:\n pulumi.set(__self__, \"ip\", ip)", "def __init__(__self__, *,\n ip: Optional[pulumi.Input['IPAccessControlArgs']] = None):\n if ip is not None:\n pulumi.set(__self__, \"ip\", ip)", "def __init__(self) -> None:\n self.ip_address: str | None = None", "def __init__(__self__, *,\n host_ip: Optional[pulumi.Input[str]] = None,\n port: Optional[pulumi.Input[str]] = None):\n if host_ip is not None:\n pulumi.set(__self__, \"host_ip\", host_ip)\n if port is not None:\n pulumi.set(__self__, \"port\", port)", "def __init__(\n self, name: str = \"\", protocol: int | None = None, **kwargs: Any\n ) -> None:\n\n super().__init__(name=name, **kwargs)\n if not ipaddress:\n raise SoftDependencyError(\"ipaddress\")\n if protocol not in [None, 4, 6]:\n raise ValueError(\"IpAddress protocol needs to be either 4, 6 or None\")\n self.protocol = protocol", "def __init__(\n self, name: str = \"\", protocol: int | None = None, **kwargs: Any\n ) -> None:\n\n super().__init__(name=name, **kwargs)\n\n if protocol not in [None, 4, 6]:\n raise ValueError(\"IpAddress protocol needs to be either 4, 6 or None\")\n self.protocol = protocol", "def test_validate_ip_ok():\n ip = '1.1.1.1'\n assert howisresolved.validate_ip(ip) is None", "def test_field_none_nullable(self):\n node_dict = {\n 'host_name': 'abc'\n }\n try:\n Node(**node_dict)\n except Exception as e:\n self.assertEqual(type(e), ValueError)", "def __init__(__self__, *,\n ip_tag_type: Optional[pulumi.Input[str]] = None,\n tag: Optional[pulumi.Input[str]] = None):\n if ip_tag_type is not None:\n pulumi.set(__self__, \"ip_tag_type\", ip_tag_type)\n if tag is not None:\n pulumi.set(__self__, \"tag\", tag)", "def __init__(self, allow_none=False):\n if allow_none:\n self.validate = self.validate_none\n self.info = self.info_none\n self.fast_validate = (2, None)\n else:\n self.fast_validate = (2,)", "def __init__(__self__, *,\n destination_pool_name: Optional[pulumi.Input[str]] = None,\n ip: Optional[pulumi.Input[str]] = None):\n if destination_pool_name is not None:\n pulumi.set(__self__, \"destination_pool_name\", destination_pool_name)\n if ip is not None:\n pulumi.set(__self__, \"ip\", ip)", "def test_init_no_mac(self):\n with self.assertRaises(ValueError):\n client = ClientInfo(None, ip=\"3.3.3.3\", ap_info=self.ap)", "def __init__(self, ip, mask):\n self.vip = ip\n self.mask = mask", "def __init__(__self__, *,\n end_ip: pulumi.Input[str],\n start_ip: pulumi.Input[str]):\n pulumi.set(__self__, \"end_ip\", end_ip)\n pulumi.set(__self__, \"start_ip\", start_ip)", "def __init__(__self__, *,\n host: Optional[pulumi.Input[str]] = None,\n port: Optional[pulumi.Input[int]] = None):\n if host is not None:\n pulumi.set(__self__, \"host\", host)\n if port is not None:\n pulumi.set(__self__, \"port\", port)", "def __init__(self, ip: str):\n self.api = API(f\"http://{ip}\")\n self.ip = ip\n self.board_data = None", "def test_no_ip_address_passed(self, api_client):\n runner = CliRunner()\n\n with patch(\"greynoise.cli.helper.sys\") as sys:\n sys.stdin.isatty.return_value = True\n result = runner.invoke(\n subcommand.ip, parent=Context(main, info_name=\"greynoise\")\n )\n assert result.exit_code == -1\n assert \"Usage: greynoise ip\" in result.output\n api_client.ip.assert_not_called()", "def __init__(__self__, *,\n ignore_missing_v_net_service_endpoint: Optional[pulumi.Input[bool]] = None,\n subnet_id: Optional[pulumi.Input[str]] = None):\n if ignore_missing_v_net_service_endpoint is not None:\n pulumi.set(__self__, \"ignore_missing_v_net_service_endpoint\", ignore_missing_v_net_service_endpoint)\n if subnet_id is not None:\n pulumi.set(__self__, \"subnet_id\", subnet_id)", "def test_optional_arg(self):\n obj = Base()\n self.assertEqual(obj.id, 1)", "def __init__(__self__, *,\n droplet_id: Optional[pulumi.Input[int]] = None,\n ip_address: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n urn: Optional[pulumi.Input[str]] = None):\n if droplet_id is not None:\n pulumi.set(__self__, \"droplet_id\", droplet_id)\n if ip_address is not None:\n pulumi.set(__self__, \"ip_address\", ip_address)\n if region is not None:\n pulumi.set(__self__, \"region\", region)\n if urn is not None:\n pulumi.set(__self__, \"urn\", urn)", "def __init__(__self__, *,\n allow: Optional[pulumi.Input[Sequence[pulumi.Input['IPRangeArgs']]]] = None):\n if allow is not None:\n pulumi.set(__self__, \"allow\", allow)", "def __init__(__self__, *,\n region: pulumi.Input[str],\n droplet_id: Optional[pulumi.Input[int]] = None,\n ip_address: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"region\", region)\n if droplet_id is not None:\n pulumi.set(__self__, \"droplet_id\", droplet_id)\n if ip_address is not None:\n pulumi.set(__self__, \"ip_address\", ip_address)", "def __init__(self):\n self.clean_optional()", "def __init__(__self__, *,\n resourcegroup: pulumi.Input[str],\n server: pulumi.Input[str],\n endipaddress: Optional[pulumi.Input[str]] = None,\n startipaddress: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"resourcegroup\", resourcegroup)\n pulumi.set(__self__, \"server\", server)\n if endipaddress is not None:\n pulumi.set(__self__, \"endipaddress\", endipaddress)\n if startipaddress is not None:\n pulumi.set(__self__, \"startipaddress\", startipaddress)", "def __init__(self, ip=None, header=None, hostname=None, aliases=None):\n self.id = header or \"\"\n self.hostname = hostname or \"\"\n self.aliases = aliases or []\n self.ip = \"\"\n if ip:\n try:\n self.ip=ipaddress.ip_address(ip)\n except ipaddress.AddressValueError as e:\n print(\"Error parsing ip address from hosts.conf file\")\n raise e\n self.stored_state = None", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'assign_public_ip': 'bool',\n 'defined_tags': 'dict(str, dict(str, object))',\n 'display_name': 'str',\n 'freeform_tags': 'dict(str, str)',\n 'hostname_label': 'str',\n 'nsg_ids': 'list[str]',\n 'private_ip': 'str',\n 'skip_source_dest_check': 'bool',\n 'subnet_id': 'str'\n }\n\n self.attribute_map = {\n 'assign_public_ip': 'assignPublicIp',\n 'defined_tags': 'definedTags',\n 'display_name': 'displayName',\n 'freeform_tags': 'freeformTags',\n 'hostname_label': 'hostnameLabel',\n 'nsg_ids': 'nsgIds',\n 'private_ip': 'privateIp',\n 'skip_source_dest_check': 'skipSourceDestCheck',\n 'subnet_id': 'subnetId'\n }\n\n self._assign_public_ip = None\n self._defined_tags = None\n self._display_name = None\n self._freeform_tags = None\n self._hostname_label = None\n self._nsg_ids = None\n self._private_ip = None\n self._skip_source_dest_check = None\n self._subnet_id = None", "def __init__(__self__,\n resource_name: str,\n args: ReservedIpArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self, **kwargs):\n members = ('ip', 'mac', 'port', 'timeout', 'flags',\n 'index', 'tag', 'hostname')\n\n # copy the given value or default to None\n for member in members:\n self.__dict__[member] = kwargs.setdefault (member, None)", "def test_init_without_kwargs(self):\n c = City()\n self.assertTrue('id' in c.__dict__)\n self.assertTrue('created_at' in c.__dict__)\n self.assertTrue('updated_at' in c.__dict__)\n self.assertIsInstance(c, BaseModel)" ]
[ "0.7619989", "0.7324193", "0.7324193", "0.7323932", "0.70612985", "0.6996996", "0.6709637", "0.6705524", "0.6618657", "0.65942365", "0.6415844", "0.6387556", "0.6366282", "0.6358961", "0.6319033", "0.6228522", "0.6208983", "0.6139439", "0.6108403", "0.6083423", "0.6066069", "0.60649264", "0.60638696", "0.6060406", "0.5978354", "0.59780127", "0.5971527", "0.59547675", "0.5952458", "0.59514964" ]
0.7339444
1
Test the __init__ method when mandatory parameter 'mac' is None.
def test_init_no_mac(self): with self.assertRaises(ValueError): client = ClientInfo(None, ip="3.3.3.3", ap_info=self.ap)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_init_no_mac(self):\n # mocks of files\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n\n with self.assertRaises(ValueError):\n ap = APInfo(port_id=1, ip=\"2.2.2.2\", mac=None, radio_mac=\"bb:bb:bb:bb:bb:00\", udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)", "def test_init_valid(self):\n payload = payloads.MACResponsePayload(\n self.unique_identifier,\n self.mac_data)\n self.assertEqual(payload.unique_identifier, self.unique_identifier)\n self.assertEqual(payload.mac_data, self.mac_data)", "def test_init_valid(self):\n payload = payloads.MACRequestPayload(\n self.unique_identifier,\n self.cryptographic_parameters,\n self.data)\n self.assertEqual(payload.unique_identifier, self.unique_identifier)\n self.assertEqual(payload.cryptographic_parameters,\n self.cryptographic_parameters)\n self.assertEqual(payload.data, self.data)", "def __init__(__self__, *,\n mac_address: str,\n name: str):\n pulumi.set(__self__, \"mac_address\", mac_address)\n pulumi.set(__self__, \"name\", name)", "def __init__(self, mac):\n self.go_on = True\n self.mac = mac", "def test_prepare_mac_header(self):\n self.assertEqual(prepare_mac_header(**self.mac_plain), self.auth_plain)\n self.assertEqual(prepare_mac_header(**self.mac_body), self.auth_body)\n self.assertEqual(prepare_mac_header(**self.mac_both), self.auth_both)", "def __init__(__self__, *,\n ios_app_info: Optional[pulumi.Input['IosAppInfoArgs']] = None,\n ios_robo_test: Optional[pulumi.Input['IosRoboTestArgs']] = None,\n ios_test_loop: Optional[pulumi.Input['IosTestLoopArgs']] = None,\n ios_xc_test: Optional[pulumi.Input['IosXcTestArgs']] = None,\n test_timeout: Optional[pulumi.Input['DurationArgs']] = None):\n if ios_app_info is not None:\n pulumi.set(__self__, \"ios_app_info\", ios_app_info)\n if ios_robo_test is not None:\n pulumi.set(__self__, \"ios_robo_test\", ios_robo_test)\n if ios_test_loop is not None:\n pulumi.set(__self__, \"ios_test_loop\", ios_test_loop)\n if ios_xc_test is not None:\n pulumi.set(__self__, \"ios_xc_test\", ios_xc_test)\n if test_timeout is not None:\n pulumi.set(__self__, \"test_timeout\", test_timeout)", "def test_init_no_ip(self):\n # mocks of files\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n\n ap = APInfo(port_id=1, ip=None, mac=\"bb:bb:bb:bb:bb:bb\", radio_mac=\"bb:bb:bb:bb:bb:00\", udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)\n self.assertEqual(ap.ip, None)", "def __init__(__self__, *,\n crashed: Optional[pulumi.Input[bool]] = None,\n device_out_of_memory: Optional[pulumi.Input[bool]] = None,\n failed_roboscript: Optional[pulumi.Input[bool]] = None,\n not_installed: Optional[pulumi.Input[bool]] = None,\n other_native_crash: Optional[pulumi.Input[bool]] = None,\n timed_out: Optional[pulumi.Input[bool]] = None,\n unable_to_crawl: Optional[pulumi.Input[bool]] = None):\n if crashed is not None:\n pulumi.set(__self__, \"crashed\", crashed)\n if device_out_of_memory is not None:\n pulumi.set(__self__, \"device_out_of_memory\", device_out_of_memory)\n if failed_roboscript is not None:\n pulumi.set(__self__, \"failed_roboscript\", failed_roboscript)\n if not_installed is not None:\n pulumi.set(__self__, \"not_installed\", not_installed)\n if other_native_crash is not None:\n pulumi.set(__self__, \"other_native_crash\", other_native_crash)\n if timed_out is not None:\n pulumi.set(__self__, \"timed_out\", timed_out)\n if unable_to_crawl is not None:\n pulumi.set(__self__, \"unable_to_crawl\", unable_to_crawl)", "def __init__(self, mac, pin, name):\n self._name = name\n self._mac = mac\n self._pin = pin\n self._state = None\n self.update()", "def _mac_test(mac):\n\n\t\tif re.search(r'([0-9A-F]{2}[:]){5}([0-9A-F]){2}', mac.upper()) is not None:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def test_init_correct(self):\n # mocks of files\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n\n ap = APInfo(port_id=1, ip=\"2.2.2.2\", mac=\"bb:bb:bb:bb:bb:bb\", radio_mac=\"bb:bb:bb:bb:bb:00\", udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)\n\n self.assertEqual(ap.ip, '2.2.2.2')", "def test_read_no_mac_data(self):\n payload = payloads.MACResponsePayload()\n args = (self.encoding_no_mac_data,)\n self.assertRaisesRegex(\n exceptions.InvalidKmipEncoding,\n \"expected mac response mac data not found\",\n payload.read,\n *args\n )", "def __init__(__self__, *,\n android_test: Optional[pulumi.Input['AndroidTestArgs']] = None,\n ios_test: Optional[pulumi.Input['IosTestArgs']] = None):\n if android_test is not None:\n pulumi.set(__self__, \"android_test\", android_test)\n if ios_test is not None:\n pulumi.set(__self__, \"ios_test\", ios_test)", "def test_init():\n machine = Machine(['a', 'b', 'c', '_'])\n assert machine.alphabet == ['a', 'b', 'c', '_']\n assert machine.head is None\n assert machine.state is None\n assert machine.tape is None", "def test_init_no_ip(self):\n client = ClientInfo(\"cc:cc:cc:cc:cc:cc\", ip=None, ap_info=self.ap)\n self.assertEqual(client.ip, None)\n self.assertEqual(client.ip_bytes, None)", "def test_init(self):\n self.assertEqual(self.device_key, self.factory.device_key)", "def __init__(self) -> None:\n super().__init__()\n self.version = 6\n (self.ofproto, self.ofparser) = ofproto_protocol._versions[self.version]\n self.mac_to_port = {}", "def __init__(__self__, *,\n crashed: bool,\n device_out_of_memory: bool,\n failed_roboscript: bool,\n not_installed: bool,\n other_native_crash: bool,\n timed_out: bool,\n unable_to_crawl: bool):\n pulumi.set(__self__, \"crashed\", crashed)\n pulumi.set(__self__, \"device_out_of_memory\", device_out_of_memory)\n pulumi.set(__self__, \"failed_roboscript\", failed_roboscript)\n pulumi.set(__self__, \"not_installed\", not_installed)\n pulumi.set(__self__, \"other_native_crash\", other_native_crash)\n pulumi.set(__self__, \"timed_out\", timed_out)\n pulumi.set(__self__, \"unable_to_crawl\", unable_to_crawl)", "def _check_init(self):\n # instantiate the guest class we want to test\n system_name = 'dummy_system'\n host_name = 'dummy.domain.com'\n user = 'root'\n passwd = 'somepwd'\n extensions = {}\n guest_obj = linux.GuestLinux(\n system_name, host_name, user, passwd, extensions)\n\n # validate if attributes were correctly assigned to object\n self.assertEqual('linux', guest_obj.GUEST_ID)\n self.assertIs(system_name, guest_obj.name)\n self.assertIs(host_name, guest_obj.host_name)\n self.assertIs(user, guest_obj.user)\n self.assertIs(passwd, guest_obj.passwd)\n self.assertIs(extensions, guest_obj.extensions)\n\n # return object for further testing\n return guest_obj", "def __init__(__self__, *,\n incompatible_app_version: Optional[pulumi.Input[bool]] = None,\n incompatible_architecture: Optional[pulumi.Input[bool]] = None,\n incompatible_device: Optional[pulumi.Input[bool]] = None):\n if incompatible_app_version is not None:\n pulumi.set(__self__, \"incompatible_app_version\", incompatible_app_version)\n if incompatible_architecture is not None:\n pulumi.set(__self__, \"incompatible_architecture\", incompatible_architecture)\n if incompatible_device is not None:\n pulumi.set(__self__, \"incompatible_device\", incompatible_device)", "def test__init__raise_exception(self):\n self.assertRaises(TypeError, MasterNodeInterface)", "def __init__(self, router: NetgearRouter, device: dict) -> None:\n self._router = router\n self._device = device\n self._mac = device[\"mac\"]\n self._name = self.get_device_name()\n self._device_name = self._name\n self._unique_id = self._mac\n self._active = device[\"active\"]", "def __init__(self, dmac: bytes, smac: bytes, typ: int, payload: bytes):\n self.dmac = dmac\n self.smac = smac\n self.typ = typ\n self.payload = payload", "def test_init_wrong_ap_type(self):\n ap_wrong = object()\n with self.assertRaises(ValueError):\n client = ClientInfo(\"cc:cc:cc:cc:cc:cc\",\n ip=\"3.3.3.3\", ap_info=ap_wrong)", "def test_init(self):\n self.assertEqual(self.foo._base_cmd, 'sleep 10; hostname')\n self.assertEqual(self.foo._base_args, {})\n self.assertEqual(self.foo.InputArgs, {})\n self.assertEqual(self.foo.OracleJobName, 'job1')", "def __init__(self, bus=None, alternativeAddress=False):\n super().__init__(bus, alternativeAddress)", "def __init__(self,\n device_name,\n create_device_func,\n props,\n hub_name_prop,\n primary_port_prop,\n secondary_port_prop,\n ethernet_switch_prop,\n ethernet_port_prop,\n get_switchboard_if_initialized,\n power_and_data_share_cable=False,\n pre_off_func=None):\n super().__init__(device_name=device_name)\n\n self._create_device_func = create_device_func\n self._hub_name_prop = hub_name_prop\n self._primary_port_prop = primary_port_prop\n self._secondary_port_prop = secondary_port_prop\n self._props = props\n self._ethernet_switch = None\n\n # Set the properties\n self._get_switchboard_if_initialized = get_switchboard_if_initialized\n self._power_and_data_share_cable = power_and_data_share_cable\n self._pre_off_func = pre_off_func\n self._ethernet_switch_prop = ethernet_switch_prop\n self._ethernet_port_prop = ethernet_port_prop", "def cna(mac):\n return mock.Mock(spec=pvm_net.CNA, mac=mac, vswitch_uri='fake_href')", "def __init__(self, fake_apple_tv):\n self.device = fake_apple_tv" ]
[ "0.7700126", "0.718434", "0.7076505", "0.69038445", "0.68528235", "0.64499795", "0.6309808", "0.6301054", "0.6203087", "0.616466", "0.6155142", "0.61442065", "0.61114347", "0.609657", "0.607744", "0.6011186", "0.6001735", "0.59643024", "0.5931269", "0.5910585", "0.5905488", "0.5900017", "0.58970076", "0.5887437", "0.5868467", "0.58156866", "0.58098906", "0.5798898", "0.57897395", "0.57893056" ]
0.79045194
0
Test the __init__ method when parameter 'ip' is None. Since the field is optional, it should pass.
def test_init_no_ip(self): client = ClientInfo("cc:cc:cc:cc:cc:cc", ip=None, ap_info=self.ap) self.assertEqual(client.ip, None) self.assertEqual(client.ip_bytes, None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_init_no_ip(self):\n # mocks of files\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n\n ap = APInfo(port_id=1, ip=None, mac=\"bb:bb:bb:bb:bb:bb\", radio_mac=\"bb:bb:bb:bb:bb:00\", udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)\n self.assertEqual(ap.ip, None)", "def __init__(__self__, *,\n ip: Optional[pulumi.Input['IPAccessControlArgs']] = None):\n if ip is not None:\n pulumi.set(__self__, \"ip\", ip)", "def __init__(__self__, *,\n ip: Optional[pulumi.Input['IPAccessControlArgs']] = None):\n if ip is not None:\n pulumi.set(__self__, \"ip\", ip)", "def __init__(self) -> None:\n self.ip_address: str | None = None", "def __init__(__self__, *,\n host_ip: Optional[pulumi.Input[str]] = None,\n port: Optional[pulumi.Input[str]] = None):\n if host_ip is not None:\n pulumi.set(__self__, \"host_ip\", host_ip)\n if port is not None:\n pulumi.set(__self__, \"port\", port)", "def __init__(\n self, name: str = \"\", protocol: int | None = None, **kwargs: Any\n ) -> None:\n\n super().__init__(name=name, **kwargs)\n if not ipaddress:\n raise SoftDependencyError(\"ipaddress\")\n if protocol not in [None, 4, 6]:\n raise ValueError(\"IpAddress protocol needs to be either 4, 6 or None\")\n self.protocol = protocol", "def __init__(\n self, name: str = \"\", protocol: int | None = None, **kwargs: Any\n ) -> None:\n\n super().__init__(name=name, **kwargs)\n\n if protocol not in [None, 4, 6]:\n raise ValueError(\"IpAddress protocol needs to be either 4, 6 or None\")\n self.protocol = protocol", "def test_validate_ip_ok():\n ip = '1.1.1.1'\n assert howisresolved.validate_ip(ip) is None", "def test_field_none_nullable(self):\n node_dict = {\n 'host_name': 'abc'\n }\n try:\n Node(**node_dict)\n except Exception as e:\n self.assertEqual(type(e), ValueError)", "def __init__(__self__, *,\n ip_tag_type: Optional[pulumi.Input[str]] = None,\n tag: Optional[pulumi.Input[str]] = None):\n if ip_tag_type is not None:\n pulumi.set(__self__, \"ip_tag_type\", ip_tag_type)\n if tag is not None:\n pulumi.set(__self__, \"tag\", tag)", "def __init__(self, allow_none=False):\n if allow_none:\n self.validate = self.validate_none\n self.info = self.info_none\n self.fast_validate = (2, None)\n else:\n self.fast_validate = (2,)", "def __init__(__self__, *,\n destination_pool_name: Optional[pulumi.Input[str]] = None,\n ip: Optional[pulumi.Input[str]] = None):\n if destination_pool_name is not None:\n pulumi.set(__self__, \"destination_pool_name\", destination_pool_name)\n if ip is not None:\n pulumi.set(__self__, \"ip\", ip)", "def test_init_no_mac(self):\n with self.assertRaises(ValueError):\n client = ClientInfo(None, ip=\"3.3.3.3\", ap_info=self.ap)", "def __init__(self, ip, mask):\n self.vip = ip\n self.mask = mask", "def __init__(__self__, *,\n end_ip: pulumi.Input[str],\n start_ip: pulumi.Input[str]):\n pulumi.set(__self__, \"end_ip\", end_ip)\n pulumi.set(__self__, \"start_ip\", start_ip)", "def __init__(__self__, *,\n host: Optional[pulumi.Input[str]] = None,\n port: Optional[pulumi.Input[int]] = None):\n if host is not None:\n pulumi.set(__self__, \"host\", host)\n if port is not None:\n pulumi.set(__self__, \"port\", port)", "def __init__(self, ip: str):\n self.api = API(f\"http://{ip}\")\n self.ip = ip\n self.board_data = None", "def test_no_ip_address_passed(self, api_client):\n runner = CliRunner()\n\n with patch(\"greynoise.cli.helper.sys\") as sys:\n sys.stdin.isatty.return_value = True\n result = runner.invoke(\n subcommand.ip, parent=Context(main, info_name=\"greynoise\")\n )\n assert result.exit_code == -1\n assert \"Usage: greynoise ip\" in result.output\n api_client.ip.assert_not_called()", "def __init__(__self__, *,\n ignore_missing_v_net_service_endpoint: Optional[pulumi.Input[bool]] = None,\n subnet_id: Optional[pulumi.Input[str]] = None):\n if ignore_missing_v_net_service_endpoint is not None:\n pulumi.set(__self__, \"ignore_missing_v_net_service_endpoint\", ignore_missing_v_net_service_endpoint)\n if subnet_id is not None:\n pulumi.set(__self__, \"subnet_id\", subnet_id)", "def test_optional_arg(self):\n obj = Base()\n self.assertEqual(obj.id, 1)", "def __init__(__self__, *,\n droplet_id: Optional[pulumi.Input[int]] = None,\n ip_address: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n urn: Optional[pulumi.Input[str]] = None):\n if droplet_id is not None:\n pulumi.set(__self__, \"droplet_id\", droplet_id)\n if ip_address is not None:\n pulumi.set(__self__, \"ip_address\", ip_address)\n if region is not None:\n pulumi.set(__self__, \"region\", region)\n if urn is not None:\n pulumi.set(__self__, \"urn\", urn)", "def __init__(__self__, *,\n allow: Optional[pulumi.Input[Sequence[pulumi.Input['IPRangeArgs']]]] = None):\n if allow is not None:\n pulumi.set(__self__, \"allow\", allow)", "def __init__(__self__, *,\n region: pulumi.Input[str],\n droplet_id: Optional[pulumi.Input[int]] = None,\n ip_address: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"region\", region)\n if droplet_id is not None:\n pulumi.set(__self__, \"droplet_id\", droplet_id)\n if ip_address is not None:\n pulumi.set(__self__, \"ip_address\", ip_address)", "def __init__(self):\n self.clean_optional()", "def __init__(__self__, *,\n resourcegroup: pulumi.Input[str],\n server: pulumi.Input[str],\n endipaddress: Optional[pulumi.Input[str]] = None,\n startipaddress: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"resourcegroup\", resourcegroup)\n pulumi.set(__self__, \"server\", server)\n if endipaddress is not None:\n pulumi.set(__self__, \"endipaddress\", endipaddress)\n if startipaddress is not None:\n pulumi.set(__self__, \"startipaddress\", startipaddress)", "def __init__(self, ip=None, header=None, hostname=None, aliases=None):\n self.id = header or \"\"\n self.hostname = hostname or \"\"\n self.aliases = aliases or []\n self.ip = \"\"\n if ip:\n try:\n self.ip=ipaddress.ip_address(ip)\n except ipaddress.AddressValueError as e:\n print(\"Error parsing ip address from hosts.conf file\")\n raise e\n self.stored_state = None", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'assign_public_ip': 'bool',\n 'defined_tags': 'dict(str, dict(str, object))',\n 'display_name': 'str',\n 'freeform_tags': 'dict(str, str)',\n 'hostname_label': 'str',\n 'nsg_ids': 'list[str]',\n 'private_ip': 'str',\n 'skip_source_dest_check': 'bool',\n 'subnet_id': 'str'\n }\n\n self.attribute_map = {\n 'assign_public_ip': 'assignPublicIp',\n 'defined_tags': 'definedTags',\n 'display_name': 'displayName',\n 'freeform_tags': 'freeformTags',\n 'hostname_label': 'hostnameLabel',\n 'nsg_ids': 'nsgIds',\n 'private_ip': 'privateIp',\n 'skip_source_dest_check': 'skipSourceDestCheck',\n 'subnet_id': 'subnetId'\n }\n\n self._assign_public_ip = None\n self._defined_tags = None\n self._display_name = None\n self._freeform_tags = None\n self._hostname_label = None\n self._nsg_ids = None\n self._private_ip = None\n self._skip_source_dest_check = None\n self._subnet_id = None", "def __init__(__self__,\n resource_name: str,\n args: ReservedIpArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self, **kwargs):\n members = ('ip', 'mac', 'port', 'timeout', 'flags',\n 'index', 'tag', 'hostname')\n\n # copy the given value or default to None\n for member in members:\n self.__dict__[member] = kwargs.setdefault (member, None)", "def test_init_without_kwargs(self):\n c = City()\n self.assertTrue('id' in c.__dict__)\n self.assertTrue('created_at' in c.__dict__)\n self.assertTrue('updated_at' in c.__dict__)\n self.assertIsInstance(c, BaseModel)" ]
[ "0.7339444", "0.7324193", "0.7324193", "0.7323932", "0.70612985", "0.6996996", "0.6709637", "0.6705524", "0.6618657", "0.65942365", "0.6415844", "0.6387556", "0.6366282", "0.6358961", "0.6319033", "0.6228522", "0.6208983", "0.6139439", "0.6108403", "0.6083423", "0.6066069", "0.60649264", "0.60638696", "0.6060406", "0.5978354", "0.59780127", "0.5971527", "0.59547675", "0.5952458", "0.59514964" ]
0.7619989
0
Test the __init__ method when mandatory parameter 'ap_info' is of wrnong type.
def test_init_wrong_ap_type(self): ap_wrong = object() with self.assertRaises(ValueError): client = ClientInfo("cc:cc:cc:cc:cc:cc", ip="3.3.3.3", ap_info=ap_wrong)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_init_correct(self):\n # mocks of files\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n\n ap = APInfo(port_id=1, ip=\"2.2.2.2\", mac=\"bb:bb:bb:bb:bb:bb\", radio_mac=\"bb:bb:bb:bb:bb:00\", udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)\n\n self.assertEqual(ap.ip, '2.2.2.2')", "def test_accepts_init_with_strict_subset_of_args(self):\n\n class API(platform.PlatformAPI):\n def __init__(self, base_url):\n pass\n\n api = API(\"some-url\")\n assert isinstance(api, platform.PlatformAPI)", "def test_init_no_mac(self):\n with self.assertRaises(ValueError):\n client = ClientInfo(None, ip=\"3.3.3.3\", ap_info=self.ap)", "def test_init_no_ip(self):\n # mocks of files\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n\n ap = APInfo(port_id=1, ip=None, mac=\"bb:bb:bb:bb:bb:bb\", radio_mac=\"bb:bb:bb:bb:bb:00\", udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)\n self.assertEqual(ap.ip, None)", "def test_init_no_mac(self):\n # mocks of files\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n\n with self.assertRaises(ValueError):\n ap = APInfo(port_id=1, ip=\"2.2.2.2\", mac=None, radio_mac=\"bb:bb:bb:bb:bb:00\", udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)", "def __init__(self, apicrap):\n pass", "def __init__(self, apicrap):\n pass", "def __init__(__self__, *,\n android_app_info: 'outputs.AndroidAppInfoResponse',\n android_instrumentation_test: 'outputs.AndroidInstrumentationTestResponse',\n android_robo_test: 'outputs.AndroidRoboTestResponse',\n android_test_loop: 'outputs.AndroidTestLoopResponse',\n test_timeout: 'outputs.DurationResponse'):\n pulumi.set(__self__, \"android_app_info\", android_app_info)\n pulumi.set(__self__, \"android_instrumentation_test\", android_instrumentation_test)\n pulumi.set(__self__, \"android_robo_test\", android_robo_test)\n pulumi.set(__self__, \"android_test_loop\", android_test_loop)\n pulumi.set(__self__, \"test_timeout\", test_timeout)", "def test_init(self):\n msg = 'Object is not a top.AdpB2CConfig'\n self.assertIsInstance(self._c, top.AdpB2CConfig, msg)", "def test_init_no_ip(self):\n client = ClientInfo(\"cc:cc:cc:cc:cc:cc\", ip=None, ap_info=self.ap)\n self.assertEqual(client.ip, None)\n self.assertEqual(client.ip_bytes, None)", "def __init__(__self__, *,\n android_app_info: Optional[pulumi.Input['AndroidAppInfoArgs']] = None,\n android_instrumentation_test: Optional[pulumi.Input['AndroidInstrumentationTestArgs']] = None,\n android_robo_test: Optional[pulumi.Input['AndroidRoboTestArgs']] = None,\n android_test_loop: Optional[pulumi.Input['AndroidTestLoopArgs']] = None,\n test_timeout: Optional[pulumi.Input['DurationArgs']] = None):\n if android_app_info is not None:\n pulumi.set(__self__, \"android_app_info\", android_app_info)\n if android_instrumentation_test is not None:\n pulumi.set(__self__, \"android_instrumentation_test\", android_instrumentation_test)\n if android_robo_test is not None:\n pulumi.set(__self__, \"android_robo_test\", android_robo_test)\n if android_test_loop is not None:\n pulumi.set(__self__, \"android_test_loop\", android_test_loop)\n if test_timeout is not None:\n pulumi.set(__self__, \"test_timeout\", test_timeout)", "def from_info(self, info: Dict[str, Any]) -> None:", "def __init__(__self__, *,\n ios_app_info: 'outputs.IosAppInfoResponse',\n ios_robo_test: 'outputs.IosRoboTestResponse',\n ios_test_loop: 'outputs.IosTestLoopResponse',\n ios_xc_test: 'outputs.IosXcTestResponse',\n test_timeout: 'outputs.DurationResponse'):\n pulumi.set(__self__, \"ios_app_info\", ios_app_info)\n pulumi.set(__self__, \"ios_robo_test\", ios_robo_test)\n pulumi.set(__self__, \"ios_test_loop\", ios_test_loop)\n pulumi.set(__self__, \"ios_xc_test\", ios_xc_test)\n pulumi.set(__self__, \"test_timeout\", test_timeout)", "def testApmonInstance(self):\n with DashboardAPI() as dashboard:\n self.assertTrue(dashboard.apmon.initializedOK())", "def test_init_correct(self):\n client = ClientInfo(\"cc:cc:cc:cc:cc:cc\", ip=\"3.3.3.3\", ap_info=self.ap)\n self.assertEqual(client.ip, \"3.3.3.3\")\n self.assertEqual(client.ip_bytes, b'\\x03\\x03\\x03\\x03')", "def test_init_arg(self):\n b1 = BaseModel(12)\n self.assertEqual(type(b1).__name__, \"BaseModel\")\n self.assertFalse(hasattr(b1, \"12\"))", "def test_constructor_fill_fields(self):\r\n builder = IndicatorBuilder(\"url\")\r\n\r\n self.assertEqual(builder.url, \"url\")\r\n self.assertEqual(builder.data, {})", "def test_init(self):\n\n class TestResource(BaseResource):\n\n name = 'test_resource'\n\n def process(self, message):\n pass\n\n api = Mock()\n api.endpoint = 'http://an_endpoint'\n route = '/a_route'\n TestResource.init(api, route)\n\n # validate the attribute values of the class\n self.assertEqual(api, TestResource.api)\n self.assertEqual(route, TestResource.route)\n self.assertEqual(api.mongodb, TestResource.mongodb)\n self.assertEqual(api.conf, TestResource.conf)\n self.assertEqual('http://an_endpoint/a_route', TestResource.endpoint)\n self.assertEqual('test_resource', TestResource.logger.name)", "def test_init(self):\r\n p = Aligner({})\r\n self.assertEqual(p.Name, 'Aligner')\r\n self.assertEqual(p.Params, {})", "def test_004_init(self):\n self.assertIsInstance(rawdata.rawdata(\"id\",testRawdata.ioc,testRawdata.sam),rawdata.rawdata)", "def test_init_kwarg(self):\n b1 = BaseModel(name=\"Red\")\n self.assertEqual(type(b1).__name__, \"BaseModel\")\n self.assertTrue(hasattr(b1, \"name\"))\n self.assertTrue(hasattr(b1, \"__class__\"))\n self.assertFalse(hasattr(b1, \"id\"))\n self.assertFalse(hasattr(b1, \"created_at\"))\n self.assertFalse(hasattr(b1, \"updated_at\"))", "def test_instantiation(self):\n self.assertIsInstance(self.amenity, Amenity)", "def __init__(self, data: dict):\n super().__init__(data)\n self._supports_validation = False\n self._ping_data_raw = data['pingData']", "def test_raises_when_init_has_superset_of_args(self):\n\n with pytest.raises(exceptions.APIImplementationError) as exc_info:\n\n class API(platform.PlatformAPI):\n def __init__(self, base_url, token, org_name, user, other):\n pass\n\n assert \"other\" in str(exc_info.value)", "def __init__(__self__, *,\n ios_app_info: Optional[pulumi.Input['IosAppInfoArgs']] = None,\n ios_robo_test: Optional[pulumi.Input['IosRoboTestArgs']] = None,\n ios_test_loop: Optional[pulumi.Input['IosTestLoopArgs']] = None,\n ios_xc_test: Optional[pulumi.Input['IosXcTestArgs']] = None,\n test_timeout: Optional[pulumi.Input['DurationArgs']] = None):\n if ios_app_info is not None:\n pulumi.set(__self__, \"ios_app_info\", ios_app_info)\n if ios_robo_test is not None:\n pulumi.set(__self__, \"ios_robo_test\", ios_robo_test)\n if ios_test_loop is not None:\n pulumi.set(__self__, \"ios_test_loop\", ios_test_loop)\n if ios_xc_test is not None:\n pulumi.set(__self__, \"ios_xc_test\", ios_xc_test)\n if test_timeout is not None:\n pulumi.set(__self__, \"test_timeout\", test_timeout)", "def test_init(self):\n with self.assertRaises(ValueError):\n TraxionPay(api_key=self.api_key)", "def test_003_init(self):\n self.assertRaises(TypeError,rawdata.rawdata,\"id\",testRawdata.ioc)", "def __init__(self, minfo=None):\n\n if minfo is None:\n minfo = {}\n self.verb = minfo.get('verb', 'reg')\n self.whitelist_name = minfo.get('whitelist_name', '')\n self.permissioned_public_keys = minfo.get('permissioned_public_keys',\n NullIdentifier)\n self.permissioned_addrs = minfo.get('permissioned_addrs',\n NullIdentifier)", "def __init__(self, info=None):\n \n self.astral = None\n if info is None:\n self.name = 'Greenwich'\n self.country = 'England'\n self._latitude = 51.168\n self._longitude = 0\n self._timezone_group = 'Europe'\n self._timezone_location = 'London'\n else:\n self._latitude = 0\n self._longitude = 0\n self._timezone_group = ''\n self._timezone_location = ''\n\n try:\n self.name = info[0].encode('utf-8')\n self.country = info[1].encode('utf-8')\n self.latitude = info[2]\n self.longitude = info[3]\n self.timezone = info[4]\n except:\n pass", "def test_init(self):\n ar = awstats_reader.AwstatsReader('/tmp', 'example.com')\n self.assertTrue(isinstance(ar, awstats_reader.AwstatsReader))" ]
[ "0.6921114", "0.65740025", "0.6484534", "0.6405382", "0.6275763", "0.6217997", "0.6217997", "0.6214719", "0.6097751", "0.6065284", "0.6019445", "0.5953711", "0.59293115", "0.5921581", "0.5915253", "0.58873135", "0.5881595", "0.58771056", "0.5855381", "0.5837291", "0.57922626", "0.5787745", "0.5729878", "0.5721052", "0.570423", "0.5699153", "0.5683576", "0.5614336", "0.5587444", "0.5586952" ]
0.7740714
0
Generator that randomly picks a banner.
def p_banner(): return random.choice([banner, banner_two, banner_three, banner_four, banner_five])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def choose_banner(banners):\n # simple random\n n = random.randint(0, len(banners)-1)\n return banners[n]", "def random_agent(bandit, iterations):\n\n for i in range(iterations):\n a = random.choice(bandit.actions)\n r = bandit.sample(a)\n yield a, r", "def __call__(self):\n return random.choice(self.fakers)", "def randomHelmet():\n return random.choice(HELMETS)", "def base_pick():\n\n rnd = generate_random(2, 15)\n return rnd", "def _get_random_bandit(self)-> Bandit:\n return np.random.choice(self.bandits)", "def getRandom(self) -> int:\n # print(self.ind)\n return choice(self.items)", "def rand(self):\n raise NotImplementedError", "def getRandom(self) -> int:", "def getRandom(self) -> int:", "def banner():\n\n def random_color():\n valid_colors = (\"red\", \"green\", \"yellow\", \"blue\", \"magenta\", \"cyan\")\n return random.choice(valid_colors)\n\n autoRecon = rf\"\"\"\n _____________ ____ ________________\n /___/___ \\ / / | /___/__ \\ Mr.P-Millz _____\n O.G./ / _ \\______/__/ |______|__|_____ * \\_________________/__/ |___\n __/__/ /_\\ \\ | | \\ __\\/ _ \\| | __/ __ \\_/ ___\\/ _ \\| |\n | | ___ \\| | /| | ( |_| ) | | \\ ___/\\ \\__( |_| ) | |\n |___|____/\\__\\____|____/_|__|\\_\\____/|__|____|_ /\\___ |\\___ \\____/|___| /\n gtihub.com/Knowledge-Wisdom-Understanding \\___\\/ \\__\\/ \\__\\_/ v{V} \\___\\/\n\n\"\"\"\n\n def print_art(msg, color):\n colored_art = colored(msg, color=color)\n print(colored_art)\n\n color = random_color()\n print_art(autoRecon, color)", "def generate(count):\n return unpack_random_animals(generate_animals_randomly(count))", "def rand(self):\n raise NotImplementedError(\"Not implemented yet.\")", "def create_banner_list():\n template_vars = {\n 'title' : 'Banners - ' + sitesettings.SITE_NAME,\n 'siteurl' : sitesettings.SITE_URL,\n 'sitename' : sitesettings.SITE_NAME,\n 'meta_desc' : 'List of step-up banners in Final Fantasy Brave Exvius (FFBE)',\n 'last_four_banners' : nav.get_last_four_banners('all'),\n 'all_banner_info' : get_all_banner_info(),\n }\n\n bn_path = os.path.join(sitesettings.LOCAL_FILE_PATH, 'banner')\n\n if not os.path.exists(bn_path):\n os.makedirs(bn_path)\n\n template_file = 'bannerlist.html'\n html_file_loc = os.path.join(bn_path, 'index.html')\n generatehtml.generate_html(\n html_file_loc, template_file, template_vars, os.path.join(os.getcwd(), 'templates'))", "def getRandom(self) -> int:\n return choice(self.list)", "def getRandom(self) -> int:\n return random.choice(self.items)", "def getRandom(self):\n return random.choice(self.ls)", "def sample(self):\n return gc.rand_state.choice(self.domain)", "def randomLeggings():\n return random.choice(LEGGINGS)", "def spinit(list):\n return (random.choice(list))", "def totem_random():\n random_head()\n random_head()\n random_head()", "def getRandomAd(self, authenticationToken, adParameters):\r\n pass", "def sample_randint(a, b):\n return a + sample(b - a + 1)", "def randomize_herbs(self):\n random.shuffle(self.herbivores)", "def random(self) -> Gadza:\n return choice(self.gadzas)", "def make_music_rand():\n pass", "def rand_gen(below, baseline):\n\treturn secrets.randbelow(below)/ baseline", "def getRandom(self) -> int:\n return random.choice(self.list)", "def getRandom(self) -> int:\n return random.choice(self.list)", "def getRandom(self) -> int:\n return random.choice(self.list)" ]
[ "0.83091736", "0.59306204", "0.58803475", "0.5830148", "0.5823706", "0.5815695", "0.5790653", "0.578291", "0.57775927", "0.57775927", "0.5766402", "0.5765952", "0.57585686", "0.5751899", "0.571453", "0.57093734", "0.5685661", "0.5679009", "0.5675502", "0.5662307", "0.5655421", "0.5635683", "0.5608538", "0.5607601", "0.56056505", "0.5557035", "0.5546172", "0.55373275", "0.55373275", "0.55373275" ]
0.82409924
1
Return the estimation of the frequncy of _item
def estimate(self, item): return self.A[item] if item in self.A.keys() else 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSupport(item):\n return float(freqSet[item]) / len(transactionList)", "def frequency(my_list, item):\n return float(my_list.count(item)) / float(len(my_list))", "def probability(self, item):\n count = self.counter.get(item, 0)\n if self.smoothing_dict:\n smooth_count = self.smoothing_dict.get(count, count)\n assert smooth_count > 0\n return smooth_count / self.smooth_total\n else:\n return count / self.total", "def getSupport(item):\n # return float(freqSet[item])/len(transactionList)\n return float(freqSet[item]) / len(dateStampSet)", "def freq():", "def freq(self) -> int:", "def percentage(my_list, item):\n return 100.0 * frequency(my_list, item)", "def getSupport(self, item):\n return self.itemCountDict[item] / self.transLength", "def GetFrequency(self):\n ...", "def get_frequency(self):\r\n x = self.query('FREQ?')\r\n if x == None: return None\r\n return float(x)", "def freq(self, x):\n return self.d.get(x, 0)", "def get_frequency(self):\r\n return self.f", "def freq(self, value: int, /) -> None:", "def flajolet_martin(self, item):\n for i, seed in enumerate(self.random_seeds):\n signature = xxh64(item, seed=seed).intdigest()\n new_est = _tail_length(signature)\n if new_est > self.fm_estimates[i]:\n self.fm_estimates[i] = new_est", "def getFreq(self,):\n\t\treturn self.freq;", "def frequency(self):\n return float(self.get_frequency())", "def aitemfreq(a):\r\n scores = pstats.aunique(a)\r\n scores = N.sort(scores)\r\n freq = N.zeros(len(scores))\r\n for i in range(len(scores)):\r\n freq[i] = N.add.reduce(N.equal(a,scores[i]))\r\n return N.array(pstats.aabut(scores, freq))", "def test_get_tax_return_frequencies_key(self):\n pass", "def compute_frequency(record):\n try:\n info = record.info\n except:\n info = record.INFO\n\n alt_freq = [float(count) / info[\"DP\"] for count in info[\"AO\"]]\n return alt_freq", "def test_get_tax_return_frequencies(self):\n pass", "def score(item, fd, key):\n return fd.get(key(item), 0)", "def get_frequency(self):\r\n # print '*********in get freq'\r\n self.cntr.run('FREQ 1')\r\n f_0_ = self.cntr.get_measurements(1)\r\n self.f_0 = f_0_[0]\r\n self.cntr.run('FREQ 2')\r\n f_rep_ = self.cntr.get_measurements(1)\r\n self.f_rep = f_rep_[0]", "def surprisal(self, item):\n return - math.log(self.probability(item))", "def get_freq(self):\n return self.freq", "def frequency(self):\n return infer_frequency(self._obj, 'ignore')", "def freq(self, freq=None):\n raise NotImplementedError()", "def get_frequency(self, detune=0) -> float:\n return np.power(2, (self._cents + detune)/1200) * 440", "def freq(self, frequency: Optional[int]):", "def _test_get_a_freq(self):\n for i, energy_test in enumerate(self.energy_list):\n if i+1 < len(self.energy_list):\n energy = (self.energy_list[i+1]+self.energy_list[i])/2.\n else:\n energy = (-self.energy_list[i-1]+2.*self.energy_list[i])\n energy_test_2 = energy_test-1e-2\n print i, energy_test_2, self._get_a_freq(energy_test_2)\n print i, energy_test, self._get_a_freq(energy_test)\n energy_test_2 = energy_test+1e-2\n print i, energy_test_2, self._get_a_freq(energy_test_2)\n print i, energy_test, 1./self.closed_orbits_t[energy_test]\n print i, energy, self._get_a_freq(energy)\n print", "def freq(self, freq: Optional[int] = None) -> Optional[int]:\n ..." ]
[ "0.7510796", "0.69838566", "0.6894229", "0.68342465", "0.65238917", "0.6520979", "0.65185326", "0.6337576", "0.6205688", "0.6191449", "0.618391", "0.61624837", "0.60866416", "0.6069064", "0.6059242", "0.60343236", "0.6033009", "0.5995923", "0.59935904", "0.58633614", "0.5861668", "0.584851", "0.584245", "0.5834996", "0.5822483", "0.5810441", "0.58063024", "0.57665586", "0.5751514", "0.5701219" ]
0.71200216
1
Create two sidewalk nodes from three nodes in a street.
def make_sidewalk_nodes(street, prev_node, curr_node, next_node): if prev_node is None: v = - curr_node.vector_to(next_node, normalize=False) vec_prev = curr_node.vector() + v prev_node = Node(None, vec_prev[0], vec_prev[1]) elif next_node is None: v = - curr_node.vector_to(prev_node, normalize=False) vec_next = curr_node.vector() + v next_node = Node(None, vec_next[0], vec_next[1]) curr_latlng = np.array(curr_node.location()) v_cp_n = curr_node.vector_to(prev_node, normalize=True) v_cn_n = curr_node.vector_to(next_node, normalize=True) v_sidewalk = v_cp_n + v_cn_n if np.linalg.norm(v_sidewalk) < 1e-10: v_sidewalk_n = np.array([v_cn_n[1], - v_cn_n[0]]) else: v_sidewalk_n = v_sidewalk / np.linalg.norm(v_sidewalk) p1 = curr_latlng + street.distance_to_sidewalk * v_sidewalk_n p2 = curr_latlng - street.distance_to_sidewalk * v_sidewalk_n p_sidewalk_1 = Node(None, p1[0], p1[1]) p_sidewalk_2 = Node(None, p2[0], p2[1]) curr_node.append_sidewalk_node(street.id, p_sidewalk_1) curr_node.append_sidewalk_node(street.id, p_sidewalk_2) # Figure out on which side you want to put each sidewalk node v_c1 = curr_node.vector_to(p_sidewalk_1) if np.cross(v_cn_n, v_c1) > 0: return p_sidewalk_1, p_sidewalk_2 else: return p_sidewalk_2, p_sidewalk_1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_crosswalks(street_network, sidewalk_network):\n\n intersection_nodes = street_network.nodes.get_intersection_nodes()\n # intersection_nodes = [street_network.nodes.get(nid) for nid in intersection_node_ids]\n\n # Create sidewalk nodes for each intersection node and overwrite the adjacency information\n for intersection_node in intersection_nodes:\n try:\n adj_street_nodes = street_network.get_adjacent_nodes(intersection_node)\n adj_street_nodes = sort_nodes(intersection_node, adj_street_nodes)\n v_curr = intersection_node.vector()\n\n if len(adj_street_nodes) == 3:\n # Take care of the case where len(adj_nodes) == 3.\n # Identify the largest angle that are formed by three segments\n # Make a dummy node between two vectors that form the largest angle\n # Using the four nodes (3 original nodes and a dummy node), create crosswalk nodes\n vectors = [intersection_node.vector_to(adj_street_node, normalize=True) for adj_street_node in adj_street_nodes]\n angles = [math.acos(np.dot(vectors[i - 1], vectors[i])) for i in range(3)]\n idx = np.argmax(angles)\n vec_idx = (idx + 1) % 3\n dummy_vector = - vectors[vec_idx] * distance_to_sidewalk\n inverse_vec = - vectors[vec_idx]\n # dummy_vector = inverse_vec * latlng_offset_size(vectors[vec_idx][1], vectors[vec_idx][0],\n # vector=inverse_vec,\n # distance=distance_to_sidewalk)\n dummy_coordinate_vector = v_curr + dummy_vector\n dummy_node = Node(None, dummy_coordinate_vector[0], dummy_coordinate_vector[1])\n adj_street_nodes.insert(idx, dummy_node)\n\n # Create crosswalk nodes and add a cross walk to the data structure\n try:\n crosswalk_nodes = make_crosswalk_nodes(intersection_node, adj_street_nodes)\n except ValueError:\n raise\n\n crosswalk_node_ids = [node.id for node in crosswalk_nodes]\n crosswalk_node_ids.append(crosswalk_node_ids[0])\n # crosswalk = Sidewalk(None, crosswalk_node_ids, \"crosswalk\")\n\n # Add nodes to the network\n for crosswalk_node in crosswalk_nodes:\n sidewalk_network.add_node(crosswalk_node)\n sidewalk_network.nodes.crosswalk_node_ids.append(crosswalk_node.id)\n\n # Add crosswalks to the network\n crosswalk_node_id_pairs = window(crosswalk_node_ids, 2)\n for node_id_pair in crosswalk_node_id_pairs:\n n1 = sidewalk_network.nodes.get(node_id_pair[0])\n n2 = sidewalk_network.nodes.get(node_id_pair[1])\n if len(n1.get_way_ids()) == 1 and len(n2.get_way_ids()) == 1:\n crosswalk = Sidewalk(None, list(node_id_pair), \"footway\")\n else:\n crosswalk = Sidewalk(None, list(node_id_pair), \"crosswalk\")\n sidewalk_network.add_way(crosswalk)\n\n # Connect the crosswalk nodes with correct sidewalk nodes\n connect_crosswalk_nodes(sidewalk_network, crosswalk_node_ids)\n except ValueError:\n log.exception(\"ValueError in make_sidewalks, so skipping...\")\n continue\n return", "def make_crosswalk_nodes(intersection_node, adj_street_nodes):\n if len(adj_street_nodes) < 4:\n raise ValueError(\"You need to pass 4 or more nodes for adj_street_nodes \")\n\n crosswalk_nodes = []\n for i in range(len(adj_street_nodes)):\n n1 = adj_street_nodes[i - 1]\n n2 = adj_street_nodes[i]\n crosswalk_node = make_crosswalk_node(intersection_node, n1, n2)\n\n # Keep track of from which streets the crosswalk nodes are created.\n way_ids = []\n for wid in n1.get_way_ids():\n way_ids.append(wid)\n for wid in n2.get_way_ids():\n way_ids.append(wid)\n way_ids = intersection_node.get_shared_way_ids(way_ids)\n\n crosswalk_node.way_ids = way_ids\n crosswalk_nodes.append(crosswalk_node)\n crosswalk_node.parents = (intersection_node, n1, n2)\n\n return crosswalk_nodes", "def connect_crosswalk_nodes(sidewalk_network, crosswalk_node_ids):\n # crosswalk_node_ids = crosswalk.get_node_ids()[:-1] # Crosswalk has a redundant node at the end.\n\n for crosswalk_node_id in crosswalk_node_ids[:-1]:\n try:\n # Get the intersection node and two nodes that created the intersection sidewalk node\n crosswalk_node = sidewalk_network.nodes.get(crosswalk_node_id)\n intersection_node, adjacent_street_node1, adjacent_street_node2 = crosswalk_node.parents\n\n # Connect sidewalk nodes created from adjacent_street_node1 and adjacent_street_node2\n # Get sidewalk nodes that are created from the street node, and\n # identify which one should be connected to crosswalk_node\n for adjacent_street_node in [adjacent_street_node1, adjacent_street_node2]:\n # Skip the dummy node\n if len(adjacent_street_node.get_way_ids()) == 0:\n continue\n\n # Create a vector from the intersection node to the adjacent street node.\n # Then also create a vector from the intersection node to the sidewalk node\n v_adjacent_street_node = intersection_node.vector_to(adjacent_street_node, normalize=True)\n shared_street_id = intersection_node.get_shared_way_ids(adjacent_street_node)[0]\n try:\n sidewalk_node_1_from_intersection, sidewalk_node_2_from_intersection = intersection_node.get_sidewalk_nodes(shared_street_id)\n except TypeError:\n # Todo: Issue #29. Sometimes shared_street_id does not exist in the intersection_node.\n log.exception(\"connect_crosswalk_nodes(): shared_street_id %s does not exist.\" % shared_street_id)\n continue\n v_sidewalk_node_1_from_intersection = intersection_node.vector_to(sidewalk_node_1_from_intersection, normalize=True)\n\n # Check which one of sidewalk_node_1_from_intersection and sidewalk_node_2_from_intersection are\n # on the same side of the road with crosswalk_node.\n # If the sign of the cross product from v_adjacent_street_node to v_crosswalk_node is same as\n # that of v_adjacent_street_node to v_sidewalk_node_1_from_intersection, then\n # sidewalk_node_1_from_intersection should be on the same side.\n # Otherwise, sidewalk_node_2_from_intersection should be on the same side with crosswalk_node.\n v_crosswalk_node = intersection_node.vector_to(crosswalk_node, normalize=True)\n if np.cross(v_adjacent_street_node, v_crosswalk_node) * np.cross(v_adjacent_street_node, v_sidewalk_node_1_from_intersection) > 0:\n node_to_swap = sidewalk_node_1_from_intersection\n else:\n node_to_swap = sidewalk_node_2_from_intersection\n\n sidewalk_network.swap_nodes(node_to_swap, crosswalk_node)\n except ValueError:\n log.exception(\"Error while connecting crosswalk nodes, so skipping...\")\n continue\n return", "def make_crosswalk_node(node, n1, n2):\n v_curr = node.vector()\n\n v1 = node.vector_to(n1, normalize=True)\n v2 = node.vector_to(n2, normalize=True)\n v = v1 + v2\n v /= np.linalg.norm(v) # Normalize the vector\n v_new = v_curr + v * 0.00011\n # v_new = v_curr + np.array(latlng_offset(v_curr[0], vector=v, distance=7))\n return Node(None, v_new[0], v_new[1])", "def parse_street_waynodes(input, use_highway):\r\n way_key = use_highway and name_highway_key or name_key\r\n rels, ways, nodes = ParserOSM().parse(input, way_key=way_key)\r\n \r\n return ways, nodes", "def _make_graph(nodes, ways):\n graph = networkx.MultiDiGraph(crs=\"EPSG:4326\")\n ways_proj = ways.set_crs(\"EPSG:4326\").to_crs(\"EPSG:3395\")\n\n for node_id, node_attr in nodes.rename(columns={'longitude': 'x', 'latitude': 'y'}).iterrows():\n graph.add_node(node_id, **node_attr)\n\n for _, way in ways_proj.iterrows():\n\n osm_oneway_values = [\"yes\", \"true\", \"1\", \"-1\", \"T\", \"F\"]\n if \"oneway\" in way and way.oneway in osm_oneway_values:\n if way[\"oneway\"] == \"-1\" or way[\"oneway\"] == \"T\":\n # paths with a one-way value of -1 or T are one-way, but in the\n # reverse direction of the nodes' order, see osm documentation\n path_nodes = list(reversed(way.nodes))\n else:\n path_nodes = way.nodes\n # add this path (in only one direction) to the graph\n one_way = True\n\n elif \"junction\" in way and way.junction == \"roundabout\":\n # roundabout are also oneway but not tagged as is\n path_nodes = way.nodes\n one_way = True\n\n # else, this path is not tagged as one-way or it is a walking network\n # (you can walk both directions on a one-way street)\n else:\n # add this path (in both directions) to the graph and set its\n # 'oneway' attribute to False. if this is a walking network, this\n # may very well be a one-way street (as cars/bikes go), but in a\n # walking-only network it is a bi-directional edge\n path_nodes = way.nodes\n one_way = False\n\n # zip together the path nodes so you get tuples like (0,1), (1,2), (2,3)\n # and so on\n path_edges = list(zip(path_nodes[:-1], path_nodes[1:]))\n graph.add_edges_from(path_edges, **way[['id']])\n if not one_way:\n path_edges_reverse = [(v, u) for u, v in path_edges]\n graph.add_edges_from(path_edges_reverse, **way[['id']])\n\n graph = osmnx.utils_graph.add_edge_lengths(graph)\n return graph", "def setup_nodes(server_node, road_node, road_segment_point):\n\n # Create working server node data subset excluding unneeded fields\n snode = [{'id': i,\n 'shapeid': item['shapeid'],\n 'x': float(item['x']),\n 'y': float(item['y']),\n 'nearest_pt': None,\n 'nearest_pt_sl_dist': float(\"inf\"),\n 'color_num': (int(item['shapeid']) + CM_OFFSET) % NUM_COLORS}\n for i, item in enumerate(server_node)]\n\n # Create working road node data subset excluding unneeded fields\n # (note: road nodes consist of junctions and road ends)\n rnode = []\n prev_coord = []\n for i, item in enumerate(road_node):\n if [item['x'], item['y']] not in prev_coord: # exclude duplicates\n rnode.append({'id': i,\n 'shapeid': item['shapeid'],\n 'x': float(item['x']),\n 'y': float(item['y']),\n 'best_server': None,\n 'color_num': GREY})\n prev_coord.append([item['x'], item['y']])\n\n # Create working road point data subset excluding unneeded fields\n rpoint = []\n point = 0\n for i in range(len(road_segment_point)):\n id = road_segment_point[i]['shapeid']\n rpoint.append({'id': i,\n 'shapeid': id,\n 'point': point,\n 'x': float(road_segment_point[i]['x']),\n 'y': float(road_segment_point[i]['y']),\n 'lowest_cost': float(\"inf\"),\n 'best_server': None,\n 'color_num': GREY})\n if i < len(road_segment_point) - 1:\n id_next = road_segment_point[i + 1]['shapeid']\n if id == id_next:\n point += 1\n else:\n point = 0\n\n # Convert adjacent road segment endpoints to lines (x1, y1) to (x2, y2)\n rseg = []\n line = 0\n seg = 0\n for i in range(len(rpoint[:-1])):\n id = rpoint[i]['shapeid']\n id_next = rpoint[i + 1]['shapeid']\n if id == id_next:\n rseg.append({'id': line,\n 'shapeid': id,\n 'segment': seg,\n 'point1': rpoint[i]['id'],\n 'x1': float(rpoint[i]['x']),\n 'y1': float(rpoint[i]['y']),\n 'point2': rpoint[i + 1]['id'],\n 'x2': float(rpoint[i + 1]['x']),\n 'y2': float(rpoint[i + 1]['y']),\n 'best_server': None,\n 'color_num': GREY})\n line += 1\n seg += 1\n else:\n seg = 0\n\n return snode, rnode, rpoint, rseg", "def merge_sidewalks(sidewalk_network1, sidewalk_network2):\n\n for node in sidewalk_network1.nodes.get_list():\n node.confirmed = True\n\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n in_other = False\n same_node = None\n for other_sidewalk_node in sidewalk_network1.nodes.get_list():\n if sidewalk_node.location() == other_sidewalk_node.location():\n in_other = True\n same_node = other_sidewalk_node\n if not in_other: # If street network 2 contains the node but street network 1 does not\n sidewalk_network1.add_node(sidewalk_node) # Add node from street network 2 to street network 1\n else: # If both networks contain the node\n sidewalk_network2.nodes.update(sidewalk_node.id, same_node)\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n\n network1_dict = {}\n for sidewalk_node in sidewalk_network1.nodes.get_list():\n network1_dict[sidewalk_node.location] = sidewalk_node\n\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n if sidewalk_node.location not in network1_dict:\n sidewalk_network1.add_node(sidewalk_node)\n else:\n sidewalk_network2.nodes.update(sidewalk_node.id, network1_dict[sidewalk_node.location])\n\n # add new ways from sidewalk_network2 to sidewalk_network1\n for way in sidewalk_network2.ways.get_list():\n # ensure all ways have correct nids, if incorrect update to correct nid from network1\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid) is None:\n way.swap_nodes(nid, sidewalk_network2.nodes.get(nid).id)\n\n has_confirmed_parents = False\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid).confirmed:\n has_confirmed_parents = True\n if not has_confirmed_parents:\n sidewalk_network1.add_way(way)\n\n return sidewalk_network1", "def _create_nodes(\n self, lanelet: Lanelet, left_way_id: str, right_way_id: str\n ) -> Tuple[List[str], List[str]]:\n left_nodes, right_nodes = [], []\n start_index = 0\n end_index = len(lanelet.left_vertices)\n pot_first_left_node, pot_first_right_node = self._get_shared_first_nodes_from_other_lanelets(\n lanelet\n )\n pot_last_left_node, pot_last_right_node = self._get_shared_last_nodes_from_other_lanelets(\n lanelet\n )\n if pot_first_left_node:\n start_index = 1\n if pot_last_left_node:\n end_index = -1\n\n if left_way_id:\n first_left_node, last_left_node = self._get_first_and_last_nodes_from_way(\n left_way_id, lanelet.adj_left_same_direction\n )\n else:\n first_left_node = pot_first_left_node\n last_left_node = pot_last_left_node\n left_nodes = self._create_nodes_from_vertices(\n lanelet.left_vertices[start_index:end_index]\n )\n if right_way_id:\n first_right_node, last_right_node = self._get_first_and_last_nodes_from_way(\n right_way_id, lanelet.adj_right_same_direction\n )\n else:\n first_right_node = pot_first_right_node\n last_right_node = pot_last_right_node\n right_nodes = self._create_nodes_from_vertices(\n lanelet.right_vertices[start_index:end_index]\n )\n\n if first_left_node:\n left_nodes.insert(0, first_left_node)\n if first_right_node:\n right_nodes.insert(0, first_right_node)\n\n if last_left_node:\n left_nodes.append(last_left_node)\n if last_right_node:\n right_nodes.append(last_right_node)\n\n return left_nodes, right_nodes", "def construct_network(self):\n r = 0\n n = self.nbr_0_splxs\n for k in range(n):\n self.splxs.append((0, (0, k)))\n self.nbr_splxs += 1\n r, edge = self.find_next_edge(r)\n # this while loop finds the new edge to treat and add it to the 1-splx list and then finds out if a 2-splx is created\n while edge != (-1, -1):\n # Add the new edge\n self.one_splxs.append((edge, self.nbr_splxs))\n self.splxs.append((1, self.nbr_1_splxs))\n self.nbr_1_splxs += 1\n self.nbr_splxs += 1\n self.dist_appearance.append(r)\n a, b = edge\n # find out if a 2-splx has been created\n for i in range(self.nbr_1_splxs - 1):\n c, d = self.one_splxs[i][0]\n if d == a:\n for j in range(i + 1, self.nbr_1_splxs - 1):\n e, f = self.one_splxs[j][0]\n if e == c and f == b:\n self.two_splxs.append((self.nbr_1_splxs - 1, i, j))\n self.splxs.append((2, self.nbr_2_splxs))\n self.nbr_2_splxs += 1\n self.nbr_splxs += 1\n self.dist_appearance.append(r)\n # find the next edge to treat\n r, edge = self.find_next_edge(r)\n print(\"Network created\")\n return ()", "def multipleInBetweenLayerEdgesIntoNodeWithNoFixedPortOrderCauseCrossings(self):\n graph = self.graph\n makeLayer = self.makeLayer\n addNodesToLayer = self.addNodesToLayer\n eastWestEdgeFromTo = self.eastWestEdgeFromTo\n\n leftLayer = makeLayer(graph)\n leftNodes = addNodesToLayer(2, leftLayer)\n rightLayer = makeLayer(graph)\n rightNodes = addNodesToLayer(3, rightLayer)\n\n self.addInLayerEdge(rightNodes[0], rightNodes[2], PortSide.WEST)\n eastWestEdgeFromTo(leftNodes[0], rightNodes[1])\n eastWestEdgeFromTo(leftNodes[0], rightNodes[1])\n\n return graph", "def build_2_node_graph(directed=False):\n if directed:\n graph = DirectedGraph()\n else:\n graph = UndirectedGraph()\n\n graph.new_node()\n graph.new_node()\n graph.new_edge(1, 2)\n\n return graph", "def create_osm_way(id, nodes, tags = {}): \n way = etree.Element('way', {'id': str(id), 'visible': 'true'})\n for node in nodes:\n way.append(etree.Element('nd', {'ref': str(node)}))\n for key, value in tags.items():\n way.append(etree.Element('tag', {'k': key, 'v': str(value)}))\n return way", "def get_shortest_route_two_nodes(start_node, end_node, graph_instance):\n route = shortest_path(\n graph_instance.ox_graph, start_node, end_node, weight=\"length\"\n )\n return route", "def multipleInBetweenLayerEdgesIntoNodeWithNoFixedPortOrder(self):\n graph = self.graph\n makeLayer = self.makeLayer\n addNodesToLayer = self.addNodesToLayer\n eastWestEdgeFromTo = self.eastWestEdgeFromTo\n\n leftLayer = makeLayer(graph)\n leftNodes = addNodesToLayer(2, leftLayer)\n rightLayer = makeLayer(graph)\n rightNodes = addNodesToLayer(2, rightLayer)\n\n self.addInLayerEdge(rightNodes[0], rightNodes[1], PortSide.WEST)\n eastWestEdgeFromTo(leftNodes[0], rightNodes[1])\n eastWestEdgeFromTo(leftNodes[0], rightNodes[1])\n\n return graph", "def parse_route_relation_waynodes(input, merge_highways):\r\n rels, ways, nodes = ParserOSM().parse(input, way_key=name_highway_ref_key, rel_key=network_ref_modifier_key)\r\n \r\n #\r\n # Collapse subrelations to surface ways.\r\n #\r\n \r\n changing = True\r\n\r\n while changing:\r\n changing = False\r\n \r\n for rel in rels.values():\r\n parts = rel['parts']\r\n\r\n for (index, part) in enumerate(parts):\r\n if part.startswith('rel:'):\r\n rel_id = part[4:]\r\n \r\n if rel_id in rels:\r\n # there's a matching subrelation, so pull all\r\n # its members up into this one looking for ways.\r\n\r\n parts[index:index+1] = rels[rel_id]['parts']\r\n del rels[rel_id]\r\n changing = True\r\n else:\r\n # no matching relation means drop it on the floor.\r\n parts[index:index+1] = []\r\n changing = True\r\n \r\n elif part.startswith('way:'):\r\n # good, we want these\r\n pass\r\n \r\n else:\r\n # not sure what this is, can't be good.\r\n parts[index:index+1] = []\r\n changing = True\r\n \r\n if changing:\r\n # rels was modified, try another round\r\n break\r\n \r\n #\r\n # Apply relation keys to ways.\r\n #\r\n \r\n rel_ways = dict()\r\n \r\n highways = dict(motorway=9, trunk=8, primary=7, secondary=6, tertiary=5)\r\n net_refs = dict()\r\n \r\n for rel in rels.values():\r\n for part in rel['parts']:\r\n # we know from above that they're all \"way:\".\r\n way_id = part[4:]\r\n \r\n # add the route relation key to the way\r\n rel_way = deepcopy(ways[way_id])\r\n way_name, way_hwy, way_ref = rel_way['key']\r\n rel_net, rel_ref, rel_mod = rel['key']\r\n \r\n if merge_highways == 'yes':\r\n rel_way['key'] = rel_net, rel_ref, rel_mod\r\n\r\n elif merge_highways == 'largest':\r\n rel_way['key'] = rel_net, rel_ref, rel_mod\r\n big_hwy = net_refs.get((rel_net, rel_ref), None)\r\n \r\n if big_hwy is None or (highways.get(way_hwy, 0) > highways.get(big_hwy, 0)):\r\n #\r\n # Either we've not yet seen this network/ref combination or\r\n # the current highway value is larger than the previously\r\n # seen largest one. Make a note of it for later.\r\n #\r\n net_refs[(rel_net, rel_ref, rel_mod)] = way_hwy\r\n\r\n else:\r\n rel_way['key'] = rel_net, rel_ref, rel_mod, way_hwy\r\n \r\n rel_ways[len(rel_ways)] = rel_way\r\n \r\n debug('%d rel_ways, %d nodes' % (len(rel_ways), len(nodes)))\r\n \r\n if merge_highways == 'largest':\r\n #\r\n # Run through the list again, assigning largest highway\r\n # values from net_refs dictionary to each way key.\r\n #\r\n for (key, rel_way) in rel_ways.items():\r\n network, ref, modifier = rel_way['key']\r\n highway = net_refs[(network, ref, modifier)]\r\n rel_ways[key]['key'] = network, ref, modifier, highway\r\n \r\n debug('%d rel_ways, %d nodes' % (len(rel_ways), len(nodes)))\r\n \r\n return rel_ways, nodes", "def __create_node(self, from_node_id, to_node_id):\n #ensure from_node_id and start_node_id is not the same\n if from_node_id == to_node_id:\n print(\"Cannot insert same node\")\n return\n \n # 1. declare two variable nodes\n n1 = n2 = None\n \n # 2. check if exist\n for x in self.__node:\n if x.getId()==from_node_id:\n n1 = x\n if x.getId()==to_node_id:\n n2 = x\n\n # 3. if n1 or n2 is None, create from_node_id / to_node_id\n if n1 is None:\n n1 = Node(from_node_id)\n self.__node.append(n1)\n \n if n2 is None:\n n2 = Node(to_node_id)\n self.__node.append(n2)\n\n #return from_node and to_node\n return n1, n2", "def prepare_gates(chip, source_gate, target_gate):\n crossroad = []\n travelled_path = []\n\n # Source and target always on z-axis 0\n source_coords = [chip.gates[source_gate][\"x\"], chip.gates[source_gate][\"y\"], 0]\n target_coords = [chip.gates[target_gate][\"x\"], chip.gates[target_gate][\"y\"], 0]\n\n chip = calculate_distance(target_coords, chip)\n\n start = chip.coordinates[0][source_coords[1]][source_coords[0]]\n start_node = nd.Node(source_coords, None, 1, start.cost + start.distance_to_goal)\n goal_node = nd.Node(target_coords, None, 1, 0)\n crossroad.append(start_node)\n\n return run_algorithm(target_coords, start_node, goal_node, chip, crossroad, travelled_path)", "def init_network(roads):\n #roads=(\n #(\"LB\", \"leuven\", \"brussel\", 27.0, 120),\n #(\"BL\", \"brussel\", \"leuven\", 30.0, 120),\n #(\"LA\", \"leuven\", \"antwerpen\", 61.0, 120),\n #(\"AL\", \"antwerpen\", \"leuven\", 63.0, 120),\n #(\"BO\", \"brussel\", \"oostende\", 110.0, 120),\n #(\"OA\", \"oostende\", \"antwerpen\", 120.0, 120),\n #(\"AH\", \"antwerpen\", \"hasselt\", 78.0, 120),\n #(\"HL\", \"hasselt\", \"leuven\", 60.0, 120))\n #0 1 2 3 4\n #road city1 city2 lenght speedlimit\n id_road = 0\n id_city1 = 1\n id_city2 = 2\n id_lenght = 3\n id_speedlimit = 4\n network = ({},{})\n for element in roads:\n road = element[id_road]\n city1 = element[id_city1]\n city2 = element[id_city2]\n lenght = element[id_lenght]\n speedlimit = element[id_speedlimit]\n time = element[id_lenght]/element[id_speedlimit]\n\t\n\t# On ajoute les informations de l'autoroute au dictionnaire\n network[0][road] = (city1,city2,lenght,speedlimit,time)\n \n\t\n\t# On ajoute la ville de départ comme clé dans le dictionnaire \n if city1 not in network[1]:\n network[1][city1] = ([],[])\n\t# On ajoute l'autoroute dans les 'autoroutes de départ' de la ville\n if road not in network[1][city1]:\n network[1][city1][0].append(road)\n \n\t# On ajoute la ville de fin comme clé dans le dictionnaire \n if city2 not in network[1]:\n network[1][city2] = ([],[])\n # On ajoute l'autoroute dans les 'autoroutes de fin' de la ville\n if road not in network[1][city2]:\n network[1][city2][1].append(road)\n \n return network", "def map_addr_tree_2(s, d, tors1, tors2):\n s_d = crc8(0, s, 0x31)%2\n if s_d == 1:\n tors1, tors2 = tors2, tors1\n n1 = len(tors1)\n n2 = len(tors2)\n #s_out, d_out = crc8(0, s, 0x31)%n1 + 1, crc8(0, d, 0x1d)%n2 + 1\n s_out, d_out = random.randint(0, n1-1) + 1, random.randint(0, n2-1) + 1\n s_out, d_out = tors1[s_out-1], tors2[d_out-1]\n return s_out, d_out", "def threeRoadConnect(data, x1, y1, x2, y2):\n temp_data = np.pad(data, (1, 1), 'constant', constant_values=0)\n # init\n points = [[x1, y1]]\n flagX = False\n flagY = False\n if not data[y1][x1] == data[y2][x2]:\n return False, []\n # Two lines parallel to the X-AXIS\n posX = 0\n for i in range(0, 18):\n if temp_data[y1 + 1][i] == 0 and temp_data[y2 + 1][i] == 0:\n if XRoadConnect(temp_data, i, y1 + 1, x1 + 1, y1 + 1) \\\n and XRoadConnect(temp_data, i, y2 + 1, x2 + 1, y2 + 1) \\\n and YRoadConnect(temp_data, i, y1 + 1, i, y2 + 1):\n flagX = True\n posX = i - 1\n if flagX:\n points.append([posX, y1])\n points.append([posX, y2])\n\n # Two lines parallel to the Y-AXIS\n posY = 0\n for i in range(0, 10):\n if temp_data[i][x1 + 1] == 0 and temp_data[i][x2 + 1] == 0:\n if YRoadConnect(temp_data, x1 + 1, i, x1 + 1, y1 + 1) \\\n and YRoadConnect(temp_data, x2 + 1, i, x2 + 1, y2 + 1) \\\n and XRoadConnect(temp_data, x1 + 1, i, x2 + 1, i):\n flagY = True\n posY = i - 1\n if flagY and flagX == False:\n points.append([x1, posY])\n points.append([x2, posY])\n\n if flagX or flagY:\n data[y1][x1] = data[y2][x2] = 0\n points.append([x2, y2])\n print(data)\n print(3)\n return flagX or flagY, points", "def get_2_step_neighbours(node):\n for i in range(len(node)):\n yield node[0:i] + (flip(node[i]),) + node[i+1:]\n\n for i, j in itertools.permutations(range(len(node)), 2):\n if i < j:\n yield node[0:i] + (flip(node[i]),) + node[i+1:j] + (flip(node[j]),) + node[j+1:]", "def parse_streetdir(self):\n \n first = self.words[self.index]['word']\n if self.index + 1 < self.length:\n second = self.words[self.index+1]['word']\n else:\n second = None\n \n if first in ['northwest', 'northeast', 'southwest', 'southeast']:\n return first, 1 \n elif first == 'nw':\n return \"northwest\", 1\n elif first == 'ne':\n return \"northeast\", 1\n elif first == 'sw':\n return \"southwest\", 1\n elif first == 'se':\n return \"southeast\", 1\n \n if first in ['n', 'north']:\n if second in ['w', 'west']:\n return \"northwest\", 2\n elif second in ['e', 'east']:\n return \"northeast\", 2\n else:\n return \"north\", 1\n elif first in ['s', 'south']:\n if second in ['w', 'west']:\n return \"southwest\", 2\n elif second in ['e', 'east']:\n return \"southeast\", 2\n else:\n return \"south\", 1\n elif first in ['e', 'east']:\n return \"east\", 1\n elif first in ['w', 'west']:\n return \"west\", 1\n \n return None,0", "def snap_connect_nodes(\n self,\n two_nodes,\n subsections=None,\n spacing=None,\n boundary_id=-1,\n ):\n two_nodes = np.asarray(two_nodes)\n if two_nodes.shape[0] != 2:\n raise ValueError(\"`two_nodes` takes two nodes only.\")\n\n # Abuse KDT since it is fast enough for this.\n kdt = KDTree(self.nodes_)\n _, nn_ind = kdt.query(two_nodes) # there are two indices\n logging.debug(\n \"Segment - Snapping and connecting [{ni}] nodes: {n}.\".format(\n ni=nn_ind,\n n=self.nodes[nn_ind]\n )\n )\n\n self.connect_nodes(\n nn_ind,\n reference_nodes=False,\n subsections=subsections,\n spacing=spacing,\n boundary_id=boundary_id,\n )", "def _poses_town02(self):\n def _simple_straight_forward():\n return [[64, 55], [54, 63], [51, 46], [45, 49], [29, 50], [61, 40]]\n\n def _simple_left_turn():\n return [[73, 62], [62, 7], [29, 43], [45, 79]]\n\n def _simple_right_turn():\n return [[78, 46], [44, 40], [18, 60], [60, 71]]\n\n return [_simple_straight_forward(),\n _simple_right_turn(),\n _simple_left_turn(),\n ]", "def build_auxiliary_structures(nodes_filename, ways_filename):\n nodes = {}\n for way in read_osm_data(ways_filename):\n highway_type = way['tags'].get('highway', '( ͡° ͜ʖ ͡°)')\n if highway_type in ALLOWED_HIGHWAY_TYPES:\n nodes_along_way = way['nodes'] # List of nodes along this way\n for i in range(len(nodes_along_way) - 1):\n # A pair of adjacent nodes along this way\n left = nodes_along_way[i]\n right = nodes_along_way[i + 1]\n default_speed_limit = DEFAULT_SPEED_LIMIT_MPH[highway_type]\n # If this way doesn't have a speed limit tag, we use the default value based on highway type\n speed_limit = way['tags'].get('maxspeed_mph', default_speed_limit)\n\n def build_data(root, adjacent):\n \"\"\"\n root: ID of some node along way\n adjacent: ID of some node adjacent to root node along way\n \"\"\"\n new_node_data_struct = {'adjacent': {adjacent: speed_limit}} # Init dict for node data structure\n root_data = nodes.get(root, new_node_data_struct)\n # There might be another way where root and adjacent are directly adjacent, so our\n # speed limit is the max of the speed limits of those two ways:\n root_data['adjacent'][adjacent] = max(root_data['adjacent'].get(adjacent, 0), speed_limit)\n nodes[root] = root_data # Add the data on root to our dictionary of node data\n\n build_data(left, right)\n if not way['tags'].get('oneway', '( ͡° ͜ʖ ͡°)') == 'yes':\n # If this isn't a oneway way, we can build the data structure for the next node as well\n build_data(right, left)\n elif right == nodes_along_way[-1]:\n # In non-oneway ways, the above build_data(right, left) call creates the data structure\n # for the final node at the same time as the penultimate one. However, in the case of a\n # oneway path, we have to do it manually:\n nodes[right] = nodes.get(right, {'adjacent': {}})\n\n for node in read_osm_data(nodes_filename):\n id = node['id']\n if id in nodes:\n # If the id of this node in the generator was on a valid way, we add the data about that node\n # to its dictionary in nodes.\n # Add lat/lon data\n nodes[id]['lat'] = node['lat']\n nodes[id]['lon'] = node['lon']\n\n return nodes", "def create_nodes(self):", "def __init__(self, from_node, to_node, span=None):\n self.from_node = from_node\n self.to_node = to_node\n self.span = span\n self.dummyedges = []", "def create_nodes(nd=None):\n\n if not nd:\n raise ValueError(\"No nodes data provided.\")\n\n nodes = []\n\n # Create Bus objects from buses table\n busd = {}\n\n for i, b in nd[\"buses\"].iterrows():\n if b[\"active\"]:\n bus = solph.Bus(label=b[\"label\"])\n nodes.append(bus)\n\n busd[b[\"label\"]] = bus\n if b[\"excess\"]:\n nodes.append(\n solph.Sink(\n label=b[\"label\"] + \"_excess\",\n inputs={\n busd[b[\"label\"]]: solph.Flow(\n variable_costs=b[\"excess costs\"]\n )\n },\n )\n )\n if b[\"shortage\"]:\n nodes.append(\n solph.Source(\n label=b[\"label\"] + \"_shortage\",\n outputs={\n busd[b[\"label\"]]: solph.Flow(\n variable_costs=b[\"shortage costs\"]\n )\n },\n )\n )\n\n # Create Source objects from table 'commodity sources'\n for i, cs in nd[\"commodity_sources\"].iterrows():\n if cs[\"active\"]:\n nodes.append(\n solph.Source(\n label=cs[\"label\"],\n outputs={\n busd[cs[\"to\"]]: solph.Flow(\n variable_costs=cs[\"variable costs\"]\n )\n },\n )\n )\n\n # Create Source objects with fixed time series from 'renewables' table\n for i, re in nd[\"renewables\"].iterrows():\n if re[\"active\"]:\n # set static outflow values\n outflow_args = {\n \"nominal_value\": re[\"capacity\"]\n }\n # get time series for node and parameter\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0] == re[\"label\"]:\n outflow_args[col.split(\".\")[1]] = nd[\"timeseries\"][col]\n\n # create\n nodes.append(\n solph.Source(\n label=re[\"label\"],\n outputs={\n busd[re[\"to\"]]: solph.Flow(**outflow_args)\n },\n )\n )\n\n # Create Sink objects with fixed time series from 'demand' table\n for i, de in nd[\"demand\"].iterrows():\n if de[\"active\"] and not pd.isnull(de['active']):\n # set static inflow values\n inflow_args = {\n \"nominal_value\": de[\"nominal value\"]\n }\n # get time series for node and parameter\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0] == de[\"label\"]:\n inflow_args[col.split(\".\")[1]] = nd[\"timeseries\"][col]\n\n # create\n nodes.append(\n solph.Sink(\n label=de[\"label\"],\n inputs={\n busd[de[\"from\"]]: solph.Flow(**inflow_args)\n },\n )\n )\n\n # Create Transformer objects from 'transformers' table\n for i, t in nd[\"transformers\"].iterrows():\n if t[\"active\"]:\n # set static inflow values\n inflow_args = {\"variable_costs\": t[\"variable input costs\"]}\n # get time series for inflow of transformer\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0] == t[\"label\"]:\n inflow_args[col.split(\".\")[1]] = nd[\"timeseries\"][col]\n # create\n nodes.append(\n solph.Transformer(\n label=t[\"label\"],\n inputs={busd[t[\"from\"]]: solph.Flow(**inflow_args)},\n outputs={\n busd[t[\"to\"]]: solph.Flow(nominal_value=t[\"capacity\"])\n },\n conversion_factors={busd[t[\"to\"]]: t[\"efficiency\"]},\n )\n )\n\n for i, s in nd[\"storages\"].iterrows():\n if s[\"active\"]:\n nodes.append(\n solph.components.GenericStorage(\n label=s[\"label\"],\n inputs={\n busd[s[\"bus\"]]: solph.Flow(\n nominal_value=s[\"capacity inflow\"],\n variable_costs=s[\"variable input costs\"],\n )\n },\n outputs={\n busd[s[\"bus\"]]: solph.Flow(\n nominal_value=s[\"capacity outflow\"],\n variable_costs=s[\"variable output costs\"],\n )\n },\n nominal_storage_capacity=s[\"nominal capacity\"],\n loss_rate=s[\"capacity loss\"],\n initial_storage_level=s[\"initial capacity\"],\n max_storage_level=s[\"capacity max\"],\n min_storage_level=s[\"capacity min\"],\n inflow_conversion_factor=s[\"efficiency inflow\"],\n outflow_conversion_factor=s[\"efficiency outflow\"],\n )\n )\n\n for i, p in nd[\"powerlines\"].iterrows():\n if p[\"active\"]:\n bus1 = busd[p[\"bus_1\"]]\n bus2 = busd[p[\"bus_2\"]]\n nodes.append(\n solph.custom.Link(\n label=\"powerline\" + \"_\" + p[\"bus_1\"] + \"_\" + p[\"bus_2\"],\n inputs={bus1: solph.Flow(), bus2: solph.Flow()},\n outputs={\n bus1: solph.Flow(nominal_value=p[\"capacity\"]),\n bus2: solph.Flow(nominal_value=p[\"capacity\"]),\n },\n conversion_factors={\n (bus1, bus2): p[\"efficiency\"],\n (bus2, bus1): p[\"efficiency\"],\n },\n )\n )\n\n return nodes", "def test_create_two_named_edges(self):\n n1, n2 = Node('a'), Node('b')\n result = n1 * 'foo' | 'bar' * n2\n self.assertEqual(result, n2)\n self.assertEqual(n1.eout, [Edge(n1, n2, 'foo', 'bar')])\n self.assertEqual(n2.ein, [Edge(n1, n2, 'foo', 'bar')])" ]
[ "0.7492876", "0.70636004", "0.67605174", "0.6115718", "0.6109412", "0.6021206", "0.601323", "0.58551604", "0.5776858", "0.5567624", "0.54745114", "0.5371082", "0.53700775", "0.53357", "0.5334523", "0.53326386", "0.53183347", "0.53169686", "0.5308732", "0.52969956", "0.52640027", "0.52387714", "0.52333623", "0.52325684", "0.52310604", "0.51998115", "0.5186951", "0.51742256", "0.51320165", "0.51042163" ]
0.76479685
0
Sort nodes around the center_node in clockwise
def sort_nodes(center_node, nodes): def cmp(n1, n2): angle1 = (math.degrees(center_node.angle_to(n1)) + 360.) % 360 angle2 = (math.degrees(center_node.angle_to(n2)) + 360.) % 360 if angle1 < angle2: return -1 elif angle1 == angle2: return 0 else: return 1 return sorted(nodes, cmp=cmp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def angle_sort_adjacent_nodes(self,n,ref_nbr=None):\n nbrs=self.node_to_nodes(n)\n if len(nbrs)==0:\n return []\n diffs=self.nodes['x'][nbrs] - self.nodes['x'][n]\n angles=np.arctan2(diffs[:,1],diffs[:,0])\n nbrs=nbrs[np.argsort(angles)]\n if ref_nbr is not None: \n i=list(nbrs).index(ref_nbr)\n nbrs=np.roll(nbrs,-i)\n return nbrs", "def sort_nodes(self):\n nodes = self._chain.root_node.ordered_subnodes_hierarchy()\n self._chain.nodes = nodes", "def sort_elements(self, elements, center):\n all_interface_neighbors = self.mesh_entities.bridge_adjacencies(\n elements, self.interface_dim, self.target_dim)\n num_neighbors = np.array(\n [np.intersect1d(elements, ns).shape[0]\n for ns in all_interface_neighbors])\n\n visited_entities = [elements[num_neighbors.argmin()]]\n\n elements_set = set(elements)\n num_elements = elements.shape[0]\n while len(visited_entities) < num_elements:\n curr_entity = visited_entities[-1]\n interface_neighbors = self.mesh_entities.bridge_adjacencies(\n curr_entity, self.interface_dim, self.target_dim)\n unvisited_entities = elements_set - set(visited_entities)\n next_entities = unvisited_entities & set(interface_neighbors)\n visited_entities.append(next_entities.pop())\n\n ordered_elements = np.array(visited_entities)\n\n elements_centers = self.mesh_entities.center[ordered_elements][:, 0:2]\n if num_elements > 2:\n # Check the orientation of the elements.\n A, B, C = elements_centers[0], elements_centers[1], elements_centers[2]\n\n # If clockwise, reverse so it is counterclockwise.\n if (B[0] - A[0])*(C[1] - A[1]) - (C[0] - A[0])*(B[1] - A[1]) < 0:\n ordered_elements = np.flip(ordered_elements)\n elif num_elements == 2:\n A, B = elements_centers[0], elements_centers[1]\n if np.cross(center[0, 0:2] - A, center[0, 0:2] - B) < 0:\n ordered_elements = np.flip(ordered_elements)\n\n return ordered_elements", "def sort_nodes(self):\n non_terminal_nodes = []\n for node in self.nodes:\n if not node.entries:\n assert self.start is None, (\n 'there are more than one node with no incoming arcs')\n self.start = node\n elif not node.exits:\n assert self.end is None, (\n 'there are more than one node with no outgoing arcs')\n self.end = node\n else:\n non_terminal_nodes.append(node)\n assert self.start is not None and self.end is not None, (\n 'no start or end node')\n self.nodes = ([self.start]\n + sorted(non_terminal_nodes,\n key=lambda x: (x.entry, x.sym))\n + [self.end])\n for n in self.nodes:\n n.exits.sort(key=lambda x: (x.dest.entry, x.dest.sym))", "def sorted_nodes_list(self):\r\n full_sorted_node_list = map(lambda k: k[0], sorted(self.graph.degree(),\r\n key=lambda k: k[1], reverse=True))\r\n return full_sorted_node_list", "def sorted_nodes(self):\n if self._sorted_nodes is None:\n self.sorting()\n return self._sorted_nodes", "def _topological_sort(self):\n self._reset_topological_order()\n\n def is_connected(src, dst):\n \"\"\"Judge two node whether are connected.\"\"\"\n for precursor in dst.precursor_nodes:\n if src == precursor.split(\":\")[0]:\n return 1\n return 0\n\n idx = 0\n while idx < len(self._topological_order):\n cur_node_name = self._topological_order[idx]\n cur_node = self.get_node(cur_node_name)\n # `scsr` is abbreviation for `successor`.\n for scsr_name in cur_node.successor_nodes:\n scsr_node = self.get_node(scsr_name)\n scsr_node.cur_in_degree -= is_connected(cur_node_name,\n scsr_node)\n if scsr_node.cur_in_degree == 0:\n self._topological_order.append(scsr_name)\n idx += 1\n self.sorted = True", "def chopnod_sort(self, table):\n if not isinstance(table, Table):\n return\n elif None in [self.chopdist, self.noddist]:\n return\n elif 'xcentroid' not in table.columns or \\\n 'ycentroid' not in table.columns:\n return\n dist = np.sqrt((self.chopdist ** 2) + (self.noddist ** 2))\n x0, y0 = table['xcentroid'], table['ycentroid']\n valid = [False] * len(table)\n for idx, row in enumerate(table):\n dx = x0 - row['xcentroid']\n dy = y0 - row['ycentroid']\n dr = np.sqrt((dx ** 2) + (dy ** 2))\n dchop = abs(dr - self.chopdist)\n dnod = abs(dr - self.noddist)\n dchopnod = abs(dr - dist)\n ok = (np.array([dchop, dnod, dchopnod]) < self.epsilon)\n if ok.astype(int).sum() >= 2:\n valid[idx] = True\n table = table[valid]", "def nodes_in_topological_order(self):\n if not self.sorted:\n self._topological_sort()\n return self._topological_order", "def objs_sort_by_center(objs, target=0):\n sorted = []\n centers = []\n for i in objs:\n if target == 0:\n centers.append((i['bbox'][0] + i['bbox'][2]) / 2.0)\n elif target == 1:\n centers.append((i['bbox'][1] + i['bbox'][3]) / 2.0)\n centers_idx = np.argsort(np.asarray(centers))\n\n for i in centers_idx:\n sorted.append(objs[i])\n \n return sorted", "def sort_linkage(Z, node_index, node_values):\n\n N = Z.shape[0] + 1 # number of leaves\n\n if node_index < 0:\n return\n\n left_child = int(Z[node_index, 0] - N)\n right_child = int(Z[node_index, 1] - N)\n\n swap = False\n\n if left_child < 0 and right_child < 0:\n swap = False\n elif left_child < 0 and right_child >= 0:\n swap = True\n elif left_child >= 0 and right_child < 0:\n swap = False\n else:\n if node_values[left_child] > node_values[right_child]:\n swap = True\n else:\n swap = False\n\n if swap:\n Z[node_index, 0] = right_child + N\n Z[node_index, 1] = left_child + N\n\n sort_linkage(Z, left_child, node_values)\n sort_linkage(Z, right_child, node_values)", "def sort_clockwise(a):\n\n # get centroids, shape=(1,2=(cx,cy))\n center = a.mean(axis=0).reshape((1, 2))\n\n sorted_inds = np.argsort(np.arctan2(a[:, 1]-center[:, 1], a[:, 0]-center[:, 0]))\n\n return np.take(a, sorted_inds, axis=0)", "def _sort_nodes_by_height(self):\n self.node_high_to_low = np.argsort(self.height)[::-1]\n\n # Also to sort neighbour node array by height\n\n neighbour_array_lo_hi = self.neighbour_array.copy() # easiest way to get size / structure right\n\n for node in range(0,self.tri.npoints):\n heights = self.height[self.neighbour_array[node]]\n neighbour_array_lo_hi[node] = self.neighbour_array[node][np.argsort(heights)]\n \n self.neighbour_array_lo_hi = neighbour_array_lo_hi", "def sortAssemsByRing(self):\n sortKey = lambda a: a.spatialLocator.getRingPos()\n self._children = sorted(self._children, key=sortKey)", "def sort_edges_clockwise(edges):\n median_reference = ft.reduce(operator.add, map(calc_edge_median, edges)) / len(\n edges\n )\n\n def sort_function(edge):\n vector_difference = median_reference - calc_edge_median(edge)\n return math.atan2(vector_difference.y, vector_difference.x)\n\n return sorted(edges, key=sort_function, reverse=True)", "def getAndSortFiducialPoints(self, center):\r\n # self.__registrationStatus.setText('Registration processing...')\r\n # pNode = self.parameterNode()\r\n # fixedAnnotationList = slicer.mrmlScene.GetNodeByID(pNode.GetParameter('fixedLandmarksListID'))\r\n # if fixedAnnotationList != None:\r\n # fixedAnnotationList.RemoveAllChildrenNodes()\r\n markerCenters = center\r\n nbCenter = len(center)\r\n for k in range(nbCenter):\r\n point = [0]\r\n for i in range(nbCenter):\r\n U,V,W = 0,0,0\r\n for j in range(nbCenter):\r\n d = 0\r\n if i != j and markerCenters[i]!=(0,0,0):\r\n d2 = (markerCenters[i][0]-markerCenters[j][0])**2+(markerCenters[i][1]-markerCenters[j][1])**2+(markerCenters[i][2]-markerCenters[j][2])**2\r\n d = d2**0.5\r\n # print markerCenters[i],markerCenters[j]\r\n #print d\r\n if d >=45 and d<=53:\r\n U += 1\r\n elif d >53 and d<60:\r\n V +=1\r\n elif d >=70 and d<80:\r\n W +=1\r\n #print U,V,W\r\n if U+V+W>=3:\r\n #print markerCenters[i]\r\n point.extend([i])\r\n point.remove(0)\r\n minX = [999,999,999,999]\r\n maxX = [-999,-999,-999,-999]\r\n sorted = [[0,0,0] for l in range(4)]\r\n sortedConverted = [[0,0,0] for l in range(4)]\r\n for i in range(2):\r\n for k in point:\r\n if markerCenters[k][0]<= minX[0]:\r\n minX[0] = markerCenters[k][0]\r\n minX[1] = k\r\n elif markerCenters[k][0]<= minX[2]:\r\n minX[2] = markerCenters[k][0]\r\n minX[3] = k\r\n if markerCenters[k][0]>= maxX[0]:\r\n maxX[0] = markerCenters[k][0]\r\n maxX[1] = k\r\n elif markerCenters[k][0]>= maxX[2]:\r\n maxX[2] = markerCenters[k][0]\r\n maxX[3] = k\r\n if markerCenters[minX[1]][1] < markerCenters[minX[3]][1]:\r\n sorted[0] = minX[1]\r\n sorted[1] = minX[3]\r\n else:\r\n sorted[0] = minX[3]\r\n sorted[1] = minX[1]\r\n if markerCenters[maxX[1]][1]>markerCenters[maxX[3]][1]:\r\n sorted[2] = maxX[1]\r\n sorted[3] = maxX[3]\r\n else:\r\n sorted[2] = maxX[3]\r\n sorted[3] = maxX[1]\r\n sorted2 = [0,0,0,0]\r\n if 1:#self.horizontalTemplate.isChecked():\r\n sorted2[0]=sorted[2]\r\n sorted2[2]=sorted[0]\r\n sorted2[1]=sorted[3]\r\n sorted2[3]=sorted[1]\r\n else:\r\n sorted2[0]=sorted[3]\r\n sorted2[2]=sorted[1]\r\n sorted2[1]=sorted[0]\r\n sorted2[3]=sorted[2]\r\n # logic = slicer.modules.annotations.logic()\r\n # logic.SetActiveHierarchyNodeID(pNode.GetParameter('fixedLandmarksListID'))\r\n # if pNode.GetParameter(\"Template\")=='4points':\r\n # nbPoints=4\r\n # elif pNode.GetParameter(\"Template\")=='3pointsCorners':\r\n # nbPoints=3\r\n l = slicer.modules.annotations.logic()\r\n l.SetActiveHierarchyNodeID(slicer.util.getNode('Fiducial List_fixed').GetID())\r\n for k in range(4) :\r\n fiducial = slicer.mrmlScene.CreateNodeByClass('vtkMRMLAnnotationFiducialNode')\r\n fiducial.SetReferenceCount(fiducial.GetReferenceCount()-1)\r\n fiducial.SetFiducialCoordinates(markerCenters[sorted2[k]])\r\n fiducial.SetName(str(k))\r\n fiducial.Initialize(slicer.mrmlScene)\r\n\r\n sRed = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeRed\")\r\n if sRed ==None :\r\n sRed = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode1\")\r\n # sRed.SetSliceVisible(1)\r\n m= sRed.GetSliceToRAS()\r\n m.SetElement(0,3,sortedConverted[3][0])\r\n m.SetElement(1,3,sortedConverted[3][1])\r\n m.SetElement(2,3,sortedConverted[3][2])\r\n sRed.Modified()\r\n return sorted2", "def sort_clockwise(coordinates):\n center = tuple(map(op.truediv, reduce(lambda x_, y_: map(op.add, x_, y_), coordinates), [len(coordinates)] * 2))\n coordinates = sorted(coordinates, key=lambda coord: (-135 - np.degrees(\n np.arctan2(*tuple(map(op.sub, center, coord))[::-1]))) % 360)\n return coordinates", "def toposorted(self):\n order = []\n colors = {node: \"white\" for node in self._neighbors}\n\n def visit(node):\n assert colors[node] == \"white\"\n colors[node] = \"gray\"\n for neighbor in self._neighbors[node]:\n if colors[neighbor] == \"white\":\n visit(neighbor)\n elif colors[neighbor] == \"gray\":\n raise CyclicGraphError(\n \"Cycle involving {!r} and {!r} detected\".format(node, neighbor)\n )\n order.append(node)\n colors[node] = \"black\"\n\n for node in self._neighbors:\n if colors[node] == \"white\":\n visit(node)\n return order", "def sort_apply_nodes(inputs, outputs, cmps):\r\n\r\n return posort(list_of_nodes(inputs, outputs), *cmps)", "def sorted_clusters(self):\n return (c for _, c in sorted((-c.size(), c) for c in self.clusters))", "def sorting(self, presorted=None):\n self._sorted_nodes = []\n if presorted:\n notsorted_nodes = copy(presorted)\n else:\n notsorted_nodes = copy(self.nodes)\n predecessors = {key: copy(val) for (key, val) in self.predecessors.items()}\n\n # nodes that depends only on the self._nodes_wip should go first\n # soe remove them from the connections\n for nd_out in self._node_wip:\n for nd_in in self.successors[nd_out.name]:\n predecessors[nd_in.name].remove(nd_out)\n\n while notsorted_nodes:\n sorted_part, notsorted_nodes = self._sorting(notsorted_nodes, predecessors)\n self._sorted_nodes += sorted_part\n for nd_out in sorted_part:\n for nd_in in self.successors[nd_out.name]:\n predecessors[nd_in.name].remove(nd_out)", "def compute_node_positions(self):\n pass", "def arrange_nodes_circular(self, radius=120):\n\n self.get()\n if self.status != \"opened\":\n self.open() # pragma: no cover\n\n _angle = (2 * pi) / len(self.nodes)\n # The Y Axis is inverted in GNS3, so the -Y is UP\n for index, n in enumerate(self.nodes):\n _x = int(radius * (sin(_angle * index)))\n _y = int(radius * (-cos(_angle * index)))\n n.update(x=_x, y=_y)", "def _sorting(self, notsorted_list, predecessors):\n remaining_nodes = []\n sorted_part = []\n for nd in notsorted_list:\n if not predecessors[nd.name]:\n sorted_part.append(nd)\n else:\n remaining_nodes.append(nd)\n return sorted_part, remaining_nodes", "def _reorder_nodes(orient, nodes, flip_matrix, unflip=False):\n # reorder nodes (Code adapted from\n # meshmode.mesh.processing.flip_simplex_element_group)\n\n # ( round to int bc applying on integers)\n flip_mat = np.rint(flip_matrix)\n if unflip:\n flip_mat = flip_mat.T\n\n # flipping twice should be identity\n assert la.norm(\n np.dot(flip_mat, flip_mat)\n - np.eye(len(flip_mat))) < 1e-13\n\n # flip nodes that need to be flipped\n flipped_nodes = np.copy(nodes)\n flipped_nodes[orient < 0] = np.einsum(\n \"ij,ej->ei\",\n flip_mat, nodes[orient < 0])\n\n return flipped_nodes", "def topological_sort(self):\n \n visited = set()\n sorted_node = [] \n\n # sort all the node in the graph\n for i in self.node_set: \n if i not in visited: \n visited = self.topological_sort_helper(i, visited, sorted_node) \n \n visited.clear()\n return sorted_node", "def topo_sort(self):\n # TODO: detect cycles\n self.find_reachable_nodes()\n # save list of nodes in topo order\n self.nodes = []\n # assign each node an id field incrementally\n cur_id = 0\n # count visited outgoing edges for each node\n unvisited = {}\n for nid, node in list(self.found.items()):\n unvisited[nid] = node.nout\n queue = [self.root]\n #print >>sys.stderr, '+++'\n while queue:\n # take off nodes whose all outgoing edges are visited from\n # queue head\n node = queue.pop(0)\n self.nodes.append(node)\n node.hg = self\n node.id = cur_id\n cur_id += 1\n for edge in node.incoming:\n edge.hg = self\n for tailnode in edge.tail:\n #print >>sys.stderr, tailnode\n unvisited[id(tailnode)] -= 1\n if unvisited[id(tailnode)] == 0:\n queue.append(tailnode)\n self.sanity_check()\n self.tasks_done.add('topo_sort')", "def depth_node_ordering(start_node, end_nodes):\n ordered_list = []\n ordered_set = set()\n working_list = [start_node]\n while working_list != []:\n node = working_list.pop(0)\n if not node in ordered_set:\n ordered_set.add(node)\n ordered_list.append(node)\n if not is_leaf_node(node) and not node in end_nodes:\n for node_op in node.get_inputs():\n working_list.append(node_op)\n return ordered_list", "def sort(self):\n while self.nodes != []:\n iterated = False\n for node in self.leaf_nodes():\n iterated = True\n self.prune_node(node)\n yield node\n if not iterated:\n raise CyclicGraphError(\"Sorting has found a cyclic graph.\")", "def _clustered_ordering(distances):\n clusterer = AgglomerativeClustering(n_clusters=len(distances), affinity='precomputed', linkage='average')\n clusterer.fit(distances)\n\n def walk_tree(children):\n ii = itertools.count(len(distances))\n children_dict = {next(ii): [x[0], x[1]] for x in children}\n ii = itertools.count(len(distances))\n children_list = [(next(ii), x[0], x[1]) for x in children]\n stack = [children_list[-1][0]]\n\n ordering = []\n while stack:\n last = stack.pop()\n if last in children_dict:\n l, r = children_dict[last]\n if l not in children_dict:\n ordering.append(l)\n else:\n stack.append(l)\n if r not in children_dict:\n ordering.append(r)\n else:\n stack.append(r) \n else:\n ordering.append(last)\n return ordering\n\n return walk_tree(clusterer.children_)" ]
[ "0.6512776", "0.6447932", "0.6426049", "0.6336005", "0.62776023", "0.62600493", "0.62230253", "0.620947", "0.619001", "0.6097722", "0.6039976", "0.6013449", "0.59956414", "0.5990902", "0.5963925", "0.5945009", "0.5903254", "0.5896407", "0.5880149", "0.5859731", "0.5853133", "0.5835773", "0.5804914", "0.578939", "0.5754291", "0.5749177", "0.57237244", "0.5709633", "0.5693866", "0.5652492" ]
0.84855396
0
Make a crosswalk node from three nodes. The first one is a pivot node and two other nodes are ones that are connected to the pivot node. The new node is created between the two nodes.
def make_crosswalk_node(node, n1, n2): v_curr = node.vector() v1 = node.vector_to(n1, normalize=True) v2 = node.vector_to(n2, normalize=True) v = v1 + v2 v /= np.linalg.norm(v) # Normalize the vector v_new = v_curr + v * 0.00011 # v_new = v_curr + np.array(latlng_offset(v_curr[0], vector=v, distance=7)) return Node(None, v_new[0], v_new[1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_crosswalk_nodes(intersection_node, adj_street_nodes):\n if len(adj_street_nodes) < 4:\n raise ValueError(\"You need to pass 4 or more nodes for adj_street_nodes \")\n\n crosswalk_nodes = []\n for i in range(len(adj_street_nodes)):\n n1 = adj_street_nodes[i - 1]\n n2 = adj_street_nodes[i]\n crosswalk_node = make_crosswalk_node(intersection_node, n1, n2)\n\n # Keep track of from which streets the crosswalk nodes are created.\n way_ids = []\n for wid in n1.get_way_ids():\n way_ids.append(wid)\n for wid in n2.get_way_ids():\n way_ids.append(wid)\n way_ids = intersection_node.get_shared_way_ids(way_ids)\n\n crosswalk_node.way_ids = way_ids\n crosswalk_nodes.append(crosswalk_node)\n crosswalk_node.parents = (intersection_node, n1, n2)\n\n return crosswalk_nodes", "def make_sidewalk_nodes(street, prev_node, curr_node, next_node):\n if prev_node is None:\n v = - curr_node.vector_to(next_node, normalize=False)\n vec_prev = curr_node.vector() + v\n prev_node = Node(None, vec_prev[0], vec_prev[1])\n elif next_node is None:\n v = - curr_node.vector_to(prev_node, normalize=False)\n vec_next = curr_node.vector() + v\n next_node = Node(None, vec_next[0], vec_next[1])\n\n curr_latlng = np.array(curr_node.location())\n\n v_cp_n = curr_node.vector_to(prev_node, normalize=True)\n v_cn_n = curr_node.vector_to(next_node, normalize=True)\n v_sidewalk = v_cp_n + v_cn_n\n\n if np.linalg.norm(v_sidewalk) < 1e-10:\n v_sidewalk_n = np.array([v_cn_n[1], - v_cn_n[0]])\n else:\n v_sidewalk_n = v_sidewalk / np.linalg.norm(v_sidewalk)\n\n p1 = curr_latlng + street.distance_to_sidewalk * v_sidewalk_n\n p2 = curr_latlng - street.distance_to_sidewalk * v_sidewalk_n\n\n p_sidewalk_1 = Node(None, p1[0], p1[1])\n p_sidewalk_2 = Node(None, p2[0], p2[1])\n\n curr_node.append_sidewalk_node(street.id, p_sidewalk_1)\n curr_node.append_sidewalk_node(street.id, p_sidewalk_2)\n\n # Figure out on which side you want to put each sidewalk node\n v_c1 = curr_node.vector_to(p_sidewalk_1)\n if np.cross(v_cn_n, v_c1) > 0:\n return p_sidewalk_1, p_sidewalk_2\n else:\n return p_sidewalk_2, p_sidewalk_1", "def __create_node(self, from_node_id, to_node_id):\n #ensure from_node_id and start_node_id is not the same\n if from_node_id == to_node_id:\n print(\"Cannot insert same node\")\n return\n \n # 1. declare two variable nodes\n n1 = n2 = None\n \n # 2. check if exist\n for x in self.__node:\n if x.getId()==from_node_id:\n n1 = x\n if x.getId()==to_node_id:\n n2 = x\n\n # 3. if n1 or n2 is None, create from_node_id / to_node_id\n if n1 is None:\n n1 = Node(from_node_id)\n self.__node.append(n1)\n \n if n2 is None:\n n2 = Node(to_node_id)\n self.__node.append(n2)\n\n #return from_node and to_node\n return n1, n2", "def pivot(self):\n\n # Pick random pivot node\n pivot = np.random.randint(self.n_v)\n\n # Get list of neighbors\n neighbors = np.asarray(list(nx.all_neighbors(self.G, pivot)))\n\n # Return if no neighbors are available\n if len(neighbors) == 0:\n return pivot, pivot, pivot\n\n # Collect all values\n values = np.asarray(list(nx.get_node_attributes(self.G, 'value').values()))\n\n # Save pivot value and set to NaN\n pivot_val = values[pivot]\n values[pivot] = np.nan\n\n # Find candidate\n candidate = np.nanargmin(np.abs(values - pivot_val))\n\n # Compute outcast\n neighbors_values = values[neighbors]\n outcast = neighbors[np.argmax(np.abs(neighbors_values - pivot_val))]\n\n # Return pivot and candidate\n return pivot, candidate, outcast", "def connect_crosswalk_nodes(sidewalk_network, crosswalk_node_ids):\n # crosswalk_node_ids = crosswalk.get_node_ids()[:-1] # Crosswalk has a redundant node at the end.\n\n for crosswalk_node_id in crosswalk_node_ids[:-1]:\n try:\n # Get the intersection node and two nodes that created the intersection sidewalk node\n crosswalk_node = sidewalk_network.nodes.get(crosswalk_node_id)\n intersection_node, adjacent_street_node1, adjacent_street_node2 = crosswalk_node.parents\n\n # Connect sidewalk nodes created from adjacent_street_node1 and adjacent_street_node2\n # Get sidewalk nodes that are created from the street node, and\n # identify which one should be connected to crosswalk_node\n for adjacent_street_node in [adjacent_street_node1, adjacent_street_node2]:\n # Skip the dummy node\n if len(adjacent_street_node.get_way_ids()) == 0:\n continue\n\n # Create a vector from the intersection node to the adjacent street node.\n # Then also create a vector from the intersection node to the sidewalk node\n v_adjacent_street_node = intersection_node.vector_to(adjacent_street_node, normalize=True)\n shared_street_id = intersection_node.get_shared_way_ids(adjacent_street_node)[0]\n try:\n sidewalk_node_1_from_intersection, sidewalk_node_2_from_intersection = intersection_node.get_sidewalk_nodes(shared_street_id)\n except TypeError:\n # Todo: Issue #29. Sometimes shared_street_id does not exist in the intersection_node.\n log.exception(\"connect_crosswalk_nodes(): shared_street_id %s does not exist.\" % shared_street_id)\n continue\n v_sidewalk_node_1_from_intersection = intersection_node.vector_to(sidewalk_node_1_from_intersection, normalize=True)\n\n # Check which one of sidewalk_node_1_from_intersection and sidewalk_node_2_from_intersection are\n # on the same side of the road with crosswalk_node.\n # If the sign of the cross product from v_adjacent_street_node to v_crosswalk_node is same as\n # that of v_adjacent_street_node to v_sidewalk_node_1_from_intersection, then\n # sidewalk_node_1_from_intersection should be on the same side.\n # Otherwise, sidewalk_node_2_from_intersection should be on the same side with crosswalk_node.\n v_crosswalk_node = intersection_node.vector_to(crosswalk_node, normalize=True)\n if np.cross(v_adjacent_street_node, v_crosswalk_node) * np.cross(v_adjacent_street_node, v_sidewalk_node_1_from_intersection) > 0:\n node_to_swap = sidewalk_node_1_from_intersection\n else:\n node_to_swap = sidewalk_node_2_from_intersection\n\n sidewalk_network.swap_nodes(node_to_swap, crosswalk_node)\n except ValueError:\n log.exception(\"Error while connecting crosswalk nodes, so skipping...\")\n continue\n return", "def create_nodes(self):", "def add_leaves_and_links(nodes, links, nleaves, nlabel, xlabel, dist_1, dist_2, lcnt):\n tmp = []\n # Mapping unmatched leaves with minimum weight matching of distance matrices.\n ndict1 = map_nodes_leaves(dist_2[lcnt:len(dist_2)], dist_1[lcnt:len(dist_1)])\n ndict1 = ndict1+lcnt\n nndict1 = np.zeros(len(dist_1)).astype(int)-1\n # The indexes of matched leaves do not need to be changed.\n for i in range(0, lcnt):\n nndict1[i] = i\n # Record resorting rule of unmatch leaves of input tree.\n for i in range(0,len(ndict1)):\n nndict1[ndict1[i]] = lcnt+i\n\n # Because the size of leaves in pivot tree is larger than that of input tree. There are some leaves in pivot tree are unmatched. We should create dummy node to input tree.\n for i in range(0, len(nndict1)):\n if nndict1[i]==-1:\n # Still use minimum weight matching of distance matrices to find the position of dummy node.\n nndict1[i] = map_nodes_leaves([dist_1[i]], dist_2)\n dict1 = nndict1\n\n # Add dummy nodes and links.\n ntmp = len(nodes)\n nodes = nodes.tolist()\n for i in range(0, len(dict1)):\n if dict1[i] not in tmp:\n tmp.append(dict1[i])\n else:\n tmp.append(ntmp)\n nleaves.append(ntmp)\n nlabel.append(xlabel[i])\n new_node = nodes[dict1[i]][:]\n new_node[1] = 0\n nodes.append(new_node)\n links = add_links(links, dict1[i], ntmp)\n ntmp = ntmp + 1\n nodes = np.array(nodes)\n return nodes, links, nleaves, nlabel", "def create_nodes(self, nodes: List[Node]):\n nodes_str = \",\\n\".join([str(n) for n in nodes])\n query = \"\"\"CREATE %s\"\"\" % nodes_str\n return self.create_tx(query)", "def transform(nodes, weights, new_corners):\n if nodes.shape[1] == 1:\n x_0 = new_corners[0, :]\n x_1 = new_corners[1, :]\n M = np.zeros((1, 1))\n M[:, 0] = 0.5 * (x_1 - x_0)\n origin = np.array([-1.0])\n elif nodes.shape[1] == 2:\n x_0 = new_corners[0, :]\n x_1 = new_corners[1, :]\n x_2 = new_corners[2, :]\n M = np.zeros((2, 2))\n M[:, 0] = 0.5 * (x_1 - x_0)\n M[:, 1] = 0.5 * (x_2 - x_0)\n origin = np.array([-1.0, -1.0])\n elif nodes.shape[1] == 3:\n x_0 = new_corners[0, :]\n x_1 = new_corners[1, :]\n x_2 = new_corners[2, :]\n x_3 = new_corners[3, :]\n M = np.zeros((3, 3))\n M[:, 0] = 0.5 * (x_1 - x_0)\n M[:, 1] = 0.5 * (x_2 - x_0)\n M[:, 2] = 0.5 * (x_3 - x_0)\n origin = np.array([-1.0, -1.0, -1.0])\n\n offset = -M @ origin + x_0\n volume_fraction = np.abs(np.linalg.det(M))\n return np.add(nodes @ M.T, offset), volume_fraction * weights", "def _create_future_pivot(self):\n if self._isSetup:\n return\n\n # create a locator and move it to the current pivot\n # parent the locator under the object\n\n locator_name = self._object.name() + \"_futurePivotLocator#\"\n\n self._futurePivot = auxiliary.get_valid_dag_node(\n pm.spaceLocator(n=locator_name)\n )\n\n pm.parent(self._futurePivot, self._object)\n\n current_pivot_pos = pm.xform(self._object, q=True, ws=True, piv=True)\n\n pm.xform(self._futurePivot, ws=True, t=current_pivot_pos[0:3])\n\n # change the color\n self._futurePivot.setAttr(\"overrideEnabled\", 1)\n self._futurePivot.setAttr(\"overrideColor\", 13)\n\n # set translate and visibility to non-keyable\n self._futurePivot.setAttr(\"tx\", k=False, channelBox=True)\n self._futurePivot.setAttr(\"ty\", k=False, channelBox=True)\n self._futurePivot.setAttr(\"tz\", k=False, channelBox=True)\n\n self._futurePivot.setAttr(\"v\", k=False, channelBox=True)\n\n # lock scale and rotate\n self._futurePivot.setAttr(\"rx\", lock=True, k=False, channelBox=False)\n self._futurePivot.setAttr(\"ry\", lock=True, k=False, channelBox=False)\n self._futurePivot.setAttr(\"rz\", lock=True, k=False, channelBox=False)\n\n self._futurePivot.setAttr(\"sx\", lock=True, k=False, channelBox=False)\n self._futurePivot.setAttr(\"sy\", lock=True, k=False, channelBox=False)\n self._futurePivot.setAttr(\"sz\", lock=True, k=False, channelBox=False)\n\n # hide it\n self._futurePivot.setAttr(\"v\", 0)", "def pivots_setup(self, mControl = None,\n mRigNull = None,\n mBallJoint = None,\n mBallWiggleJoint = None,\n mToeJoint = None,\n jointOrientation = 'zyx',\n pivotResult = None,\n mDag = None,\n rollSetup = 'default',\n l_pivotOrder = l_pivotOrder,\n setup = 'default',\n setupWobble = False,\n setupSpin = True,\n **kws):\n _short = self.mNode\n _str_func = 'pivots_setup'\n log.debug(cgmGEN.logString_start(_str_func))\n\n \n _side = get_side(self)\n if _side in ['right']:\n d_bankNames = d_pivotBankNames['right']\n else:\n d_bankNames = d_pivotBankNames['default']\n \n if mDag == None:\n mDag = mControl\n\n d_strCaps = {'front':kws.get('front','toe').capitalize(),\n 'back':kws.get('back','heel').capitalize(),\n 'left':kws.get('left','outer').capitalize(),\n 'right':kws.get('right','inner').capitalize(),\n 'center':kws.get('center','ball').capitalize()}\n\n \n if mRigNull is None:\n mRigNull = self.moduleTarget.rigNull\n \n if not mControl:\n mControl = mRigNull.handle\n else:\n mControl = cgmMeta.validateObjArg(mControl,'cgmObject')\n \n mPivotResult = cgmMeta.validateObjArg(pivotResult,'cgmObject',noneValid=True)\n \n #Find our pivots and create grups ========================================================================\n d_pivots = {}\n #d_twistGroups = {}\n d_drivenGroups = {}\n mLastParent = mDag\n for a in l_pivotOrder:\n str_a = 'pivot' + a.capitalize()\n mPivot = mRigNull.getMessage(str_a,asMeta=True)\n \n #remap for right side\n if _side == 'right':\n if a == 'left':\n a = 'right'\n elif a == 'right':\n a = 'left'\n \n if mPivot:\n log.debug(\"|{0}| >> Found: {1}\".format(_str_func,str_a))\n if mPivot[0].getMessage('masterGroup'):\n mPivot = mPivot[0]\n d_pivots[a] = mPivot\n \n if a in ['left','right']:\n mPivot.doStore('cgmName', d_bankNames[a])\n \n _str = NAMETOOLS.get_combinedNameDict(mControl.mNode,['cgmType','cgmTypeModifier'])\n mPivot.doStore('cgmNameModifier',_str)\n mPivot.doName()\n \n mPivot.rotateOrder = 2\n mPivot.masterGroup.parent = mLastParent\n mDrivenGroup = mPivot.doGroup(False, False, asMeta=True)\n mDrivenGroup.addAttr('cgmType','pivotDriver')\n \n mPivot.connectChildNode(mDrivenGroup,'drivenGroup','handle')#Connect \n \n mDrivenGroup.doName()\n d_drivenGroups[a] = mDrivenGroup\n mDrivenGroup.parent = mPivot\n \n mLastParent = mDrivenGroup\n continue\n log.error(\"|{0}| >> No master group on pivot. Wrong stage: {1}\".format(_str_func,str_a))\n \n #if mDag == None:\n #mDag.p_parent = mLastParent\n #mLastParent = mDag\n \n if mBallWiggleJoint:\n mBallWiggleJoint.parent = mLastParent\n mZeroGroup = mBallWiggleJoint.doGroup(True, True, asMeta=True,typeModifier = 'zero')\n mBallWiggleJoint.jointOrient = 0,0,0\n mBallWiggleJoint.rotate = 0,0,0\n \n #mPivot.connectChildNode(mDrivenGroup,'drivenGroup','handle')#Connect \n \n if mBallJoint:\n mBallJoint.parent = mLastParent\n mZeroGroup = mBallJoint.doGroup(True, True, asMeta=True,typeModifier = 'zero')\n mBallJoint.jointOrient = 0,0,0\n mBallJoint.rotate = 0,0,0\n \n mLastParent = mBallJoint\n \n \n if not d_pivots:\n raise ValueError,\"|{0}| >> No pivots found. mBlock: {1}\".format(_str_func,self)\n \n #pprint.pprint(vars())\n \n \n \n #Logic ======================================================================================\n if setup == 'default':\n if d_drivenGroups.get('center'):\n b_centerOK = True\n else:\n log.error(\"|{0}| >> Missing center pivot setup info...\".format(_str_func)) \n b_centerOK = False\n \n if d_drivenGroups.get('front') and d_drivenGroups.get('back'):\n b_rollOK = True\n else:\n log.error(\"|{0}| >> Missing roll setup...\".format(_str_func)) \n b_rollOK = False\n \n if d_drivenGroups.get('left') and d_drivenGroups.get('right'):\n b_bankOK = True\n else:\n log.error(\"|{0}| >> Missing bank setup...\".format(_str_func)) \n b_bankOK = False\n\n #Attributes ...---------------------------------------------------------------------------------------------\n log.debug(\"|{0}| >> Attributes ...\".format(_str_func)) \n \n #Roll ===================================================================================================\n if b_rollOK:\n mPlug_roll = cgmMeta.cgmAttr(mControl,'roll',attrType='float',defaultValue = 0,keyable = True)\n \n \n if rollSetup in ['human','ik','foot']:\n log.debug(\"|{0}| >> foot setup ...\".format(_str_func))\n mFrontToe = d_drivenGroups['front']\n mHeel = d_drivenGroups['back']\n \n mPlug_toeLift = cgmMeta.cgmAttr(mControl,'rollBallLift_set',attrType='float',initialValue = 35, defaultValue = 35,keyable = True)\n mPlug_toeStaighten = cgmMeta.cgmAttr(mControl,'rollBallStraight_set',attrType='float',initialValue = 65,defaultValue = 65,keyable = True)\n \n if mBallWiggleJoint:\n mPlug_ballUpDn = cgmMeta.cgmAttr(mControl,'ballLift',attrType='float',defaultValue = 0,keyable = True)\n mPlug_ballTwist= cgmMeta.cgmAttr(mControl,'ballTwist',attrType='float',defaultValue = 0,keyable = True) \n mPlug_ballWiggle= cgmMeta.cgmAttr(mControl,'ballSide',attrType='float',defaultValue = 0,keyable = True) \n \n mPlug_ballUpDn.doConnectOut(\"%s.r%s\"%(mBallWiggleJoint.mNode,jointOrientation[2].lower()))\n \n if _side in ['right']:\n str_arg = \"{0}.r{1} = -{2}\".format(mBallWiggleJoint.mNode,\n jointOrientation[0].lower(),\n mPlug_ballTwist.p_combinedShortName)\n log.debug(\"|{0}| >> Ball wiggle Right arg: {1}\".format(_str_func,str_arg)) \n NODEFACTORY.argsToNodes(str_arg).doBuild()\n \n str_arg = \"{0}.r{1} = -{2}\".format(mBallWiggleJoint.mNode,\n jointOrientation[1].lower(),\n mPlug_ballWiggle.p_combinedShortName)\n log.debug(\"|{0}| >> Ball wiggle Right arg: {1}\".format(_str_func,str_arg)) \n NODEFACTORY.argsToNodes(str_arg).doBuild() \n else:\n mPlug_ballTwist.doConnectOut(\"%s.r%s\"%(mBallWiggleJoint.mNode,jointOrientation[0].lower()))\n mPlug_ballWiggle.doConnectOut(\"%s.r%s\"%(mBallWiggleJoint.mNode,jointOrientation[1].lower()))\n \n \n #Heel setup ----------------------------------------------------------------------------------------\n log.debug(\"|{0}| >> Heel ...\".format(_str_func)) \n mPlug_heelClampResult = cgmMeta.cgmAttr(mControl,'result_clamp_heel',attrType='float',keyable = False,hidden=True)\n \n #Setup the heel roll\n #Clamp\n \n _arg = \"{0} = clamp({1},0,{2})\".format(mPlug_heelClampResult.p_combinedShortName,\n mPlug_roll.p_combinedShortName,\n mPlug_roll.p_combinedShortName)\n \n log.debug(\"|{0}| >> heel arg: {1}\".format(_str_func,_arg)) \n NODEFACTORY.argsToNodes(_arg).doBuild()\n \n #Inversion\n mPlug_heelClampResult.doConnectOut(\"%s.rx\"%(mHeel.mNode)) \n \n #Ball setup ----------------------------------------------------------------------------------------------\n \"\"\"\n Schleifer's\n ball_loc.rx = (linstep(0,$toeLift, $roll) * (1-(linstep( $toeLift, $toeStraight, $roll))) * $roll;\n ballToeLiftRoll md ( pma toeToeStraightRoll md \n 1 4 3 2 5\n \"\"\"\n log.debug(\"|{0}| >> ball ...\".format(_str_func)) \n \n mPlug_ballToeLiftRollResult = cgmMeta.cgmAttr(mControl,'result_range_ballToeLiftRoll',\n attrType='float',keyable = False,hidden=True)\n mPlug_toeStraightRollResult = cgmMeta.cgmAttr(mControl,'result_range_toeStraightRoll',\n attrType='float',keyable = False,hidden=True)\n mPlug_oneMinusToeResultResult = cgmMeta.cgmAttr(mControl,\n 'result_pma_one_minus_toeStraitRollRange',\n attrType='float',keyable = False,hidden=True)\n mPlug_ball_x_toeResult = cgmMeta.cgmAttr(mControl,'result_md_roll_x_toeResult',\n attrType='float',keyable = False,hidden=True)\n mPlug_all_x_rollResult = cgmMeta.cgmAttr(mControl,'result_md_all_x_rollResult',\n attrType='float',keyable = False,hidden=True)\n \n arg1 = \"%s = setRange(0,1,0,%s,%s)\"%(mPlug_ballToeLiftRollResult.p_combinedShortName,\n mPlug_toeLift.p_combinedShortName,\n mPlug_roll.p_combinedShortName)\n arg2 = \"%s = setRange(0,1,%s,%s,%s)\"%(mPlug_toeStraightRollResult.p_combinedShortName,\n mPlug_toeLift.p_combinedShortName,\n mPlug_toeStaighten.p_combinedShortName,\n mPlug_roll.p_combinedShortName)\n arg3 = \"%s = 1 - %s\"%(mPlug_oneMinusToeResultResult.p_combinedShortName,\n mPlug_toeStraightRollResult.p_combinedShortName)\n \n arg4 = \"%s = %s * %s\"%(mPlug_ball_x_toeResult.p_combinedShortName,\n mPlug_oneMinusToeResultResult.p_combinedShortName,\n mPlug_ballToeLiftRollResult.p_combinedShortName)\n \n arg5 = \"%s = %s * %s\"%(mPlug_all_x_rollResult.p_combinedShortName,\n mPlug_ball_x_toeResult.p_combinedShortName,\n mPlug_roll.p_combinedShortName)\n \n for arg in [arg1,arg2,arg3,arg4,arg5]:\n NODEFACTORY.argsToNodes(arg).doBuild()\n \n #>>>Josh - resolve getting this back in and where it need to be in heirarchy\n if mBallJoint:\n mPlug_all_x_rollResult.doConnectOut(\"%s.r%s\"%(mBallJoint.mNode,jointOrientation[2]))\n \n \n #Toe setup -----------------------------------------------------------------------------------------------\n \"\"\"\n Schleifer's\n toe_loc.rotateX = linstep($toeLift, $toeStraight,$roll) * $roll;\n setRange md\n 1 2\n \"\"\"\n log.debug(\"|{0}| >> Toe ...\".format(_str_func)) \n \n mPlug_toeRangeResult = cgmMeta.cgmAttr(mControl,'result_range_toeLiftStraightRoll',attrType='float',keyable = False,hidden=True)\n mPlug_toe_x_rollResult = cgmMeta.cgmAttr(mControl,'result_md_toeRange_x_roll',attrType='float',keyable = False,hidden=True)\n \n arg1 = \"%s = setRange(0,1,%s,%s,%s)\"%(mPlug_toeRangeResult.p_combinedShortName,\n mPlug_toeLift.p_combinedShortName,\n mPlug_toeStaighten.p_combinedShortName, \n mPlug_roll.p_combinedShortName)\n arg2 = \"%s = %s * %s\"%(mPlug_toe_x_rollResult.p_combinedShortName,\n mPlug_toeRangeResult.p_combinedShortName,\n mPlug_roll.p_combinedShortName)\n for arg in [arg1,arg2]:\n NODEFACTORY.argsToNodes(arg).doBuild() \n \n mPlug_toe_x_rollResult.doConnectOut(\"%s.rx\"%(mFrontToe.mNode))\n \n #mPlug_toeRollResult.doConnectOut(\"%s.rx\"%(mToe.mNode))\n #mPlug_heelRollResult.doConnectOut(\"%s.rx\"%(mHeel.mNode)) \n else:\n log.debug(\"|{0}| >> StandardRoll ...\".format(_str_func))\n \n #Roll setup -----------------------------------------------------------------------------------------------\n \"\"\"\n Schleifer's\n outside_loc.rotateZ = min($side,0);\n clamp1\n inside_loc.rotateZ = max(0,$side);\n clamp2\n \"\"\" \n log.debug(\"|{0}| >> Bank ...\".format(_str_func)) \n mToe = d_drivenGroups['front']\n mHeel = d_drivenGroups['back']\n \n mPlug_toeRollResult = cgmMeta.cgmAttr(mControl,'result_clamp_toeRoll',attrType='float',keyable = False,hidden=True)\n mPlug_heelRollResult = cgmMeta.cgmAttr(mControl,'result_clamp_heelRoll',attrType='float',keyable = False,hidden=True)\n \n arg1 = \"%s = clamp(-360,0,%s)\"%(mPlug_heelRollResult.p_combinedShortName, \n mPlug_roll.p_combinedShortName)\n arg2 = \"%s = clamp(0,360,%s)\"%(mPlug_toeRollResult.p_combinedShortName,\n mPlug_roll.p_combinedShortName)\n for arg in [arg1,arg2]:\n NODEFACTORY.argsToNodes(arg).doBuild() \n \n mPlug_toeRollResult.doConnectOut(\"%s.rx\"%(mToe.mNode))\n mPlug_heelRollResult.doConnectOut(\"%s.rx\"%(mHeel.mNode))\n \n if b_centerOK: \n mPlug_bankBall = cgmMeta.cgmAttr(mControl,'centerBank',attrType='float',defaultValue = 0,keyable = True)\n mPlug_rollBall = cgmMeta.cgmAttr(mControl,'centerRoll',attrType='float',defaultValue = 0,keyable = True) \n \n #Ball roll ....\n mDriven = d_drivenGroups['center']\n mPlug_rollBall.doConnectOut(\"{0}.rx\".format(mDriven.mNode)) \n \"\"\"\n if _side in ['right']:\n str_arg = \"{0}.rx = -{1}\".format(mDriven.mNode,\n mPlug_rollBall.p_combinedShortName)\n log.debug(\"|{0}| >> Right arg: {1}\".format(_str_func,str_arg)) \n NODEFACTORY.argsToNodes(str_arg).doBuild()\n else:\n mPlug_rollBall.doConnectOut(\"{0}.rx\".format(mDriven.mNode)) \"\"\"\n \n \n #Spins ===================================================================================================\n \n log.debug(\"|{0}| >> Spin ...\".format(_str_func))\n if setupSpin:\n d_mPlugSpin = {}\n for k in d_drivenGroups.keys():\n d_mPlugSpin[k] = cgmMeta.cgmAttr(mControl,'spin{0}'.format(d_strCaps[k]),attrType='float',defaultValue = 0,keyable = True)\n \n for k in d_drivenGroups.keys():\n str_key = d_strCaps[k]\n mPlug = d_mPlugSpin[k]\n mDriven = d_drivenGroups[k]\n log.debug(\"|{0}| >> Spin {1} setup\".format(_str_func,str_key)) \n \n if _side in ['right']:# and k not in ['inner','outer']:\n str_arg = \"{0}.ry = -{1}\".format(mDriven.mNode,\n mPlug.p_combinedShortName)\n log.debug(\"|{0}| >> Spin Right arg: {1}\".format(_str_func,str_arg)) \n NODEFACTORY.argsToNodes(str_arg).doBuild()\n else:\n mPlug.doConnectOut(\"{0}.ry\".format(mDriven.mNode)) \n \n \n if b_bankOK:#Bank ===================================================================================================\n log.debug(\"|{0}| >> Bank ...\".format(_str_func))\n mPlug_bank = cgmMeta.cgmAttr(mControl,'bank',attrType='float',defaultValue = 0,keyable = True)\n \n mPlug_outerResult = cgmMeta.cgmAttr(mControl,'result_clamp_outerBank',attrType='float',keyable = False,hidden=True)\n mPlug_innerResult = cgmMeta.cgmAttr(mControl,'result_clamp_innerBank',attrType='float',keyable = False,hidden=True)\n \n if _side in ['right']:\n log.debug(\"|{0}| >> Bank right...\".format(_str_func)) \n mDrivenOutr = d_drivenGroups['left']\n mDrivenInner =d_drivenGroups['right']\n \n arg1 = \"%s = clamp(-360,0,%s)\"%(mPlug_innerResult.p_combinedShortName, \n mPlug_bank.p_combinedShortName)\n arg2 = \"%s = clamp(0,360,%s)\"%(mPlug_outerResult.p_combinedShortName,\n mPlug_bank.p_combinedShortName)\n for arg in [arg1,arg2]:\n NODEFACTORY.argsToNodes(arg).doBuild() \n \n str_bankDriverOutr = \"%s.rz = -%s\"%(mDrivenInner.mNode,\n mPlug_outerResult.p_combinedShortName)\n str_bankDriverInnr = \"%s.rz = -%s\"%(mDrivenOutr.mNode,\n mPlug_innerResult.p_combinedShortName) \n for arg in [str_bankDriverInnr,str_bankDriverOutr]:\n NODEFACTORY.argsToNodes(arg).doBuild()\n else:\n log.debug(\"|{0}| >> Bank normal...\".format(_str_func)) \n mDrivenOutr = d_drivenGroups['left']\n mDrivenInner =d_drivenGroups['right']\n \n arg1 = \"%s = clamp(-360,0,%s)\"%(mPlug_outerResult.p_combinedShortName, \n mPlug_bank.p_combinedShortName)\n arg2 = \"%s = clamp(0,360,%s)\"%(mPlug_innerResult.p_combinedShortName,\n mPlug_bank.p_combinedShortName)\n for arg in [arg1,arg2]:\n NODEFACTORY.argsToNodes(arg).doBuild() \n \n mPlug_outerResult.doConnectOut(\"%s.rz\"%(mDrivenOutr.mNode))\n mPlug_innerResult.doConnectOut(\"%s.rz\"%(mDrivenInner.mNode))\n \n \n if b_centerOK:#Ball bank ....\n log.debug(\"|{0}| >> Bank Center...\".format(_str_func)) \n mDriven = d_drivenGroups['center']\n if _side in ['right']:\n str_arg = \"{0}.rz = -{1}\".format(mDriven.mNode,\n mPlug_bankBall.p_combinedShortName)\n log.debug(\"|{0}| >> Right arg: {1}\".format(_str_func,str_arg)) \n NODEFACTORY.argsToNodes(str_arg).doBuild()\n else:\n mPlug_bankBall.doConnectOut(\"{0}.rz\".format(mDriven.mNode)) \n \n if setupWobble:\n mDriven = d_drivenGroups['tilt']\n mSpin = d_pivots['spin']\n mDriven.p_position = d_pivots['center'].p_position\n d_pivots['center'].masterGroup.p_position = mDriven.p_position\n \n arg1 = \"{}.ry = {}.ry * -1.0\".format(mDriven.p_nameShort,\n mSpin.p_nameShort) \n for arg in [arg1]:\n NODEFACTORY.argsToNodes(arg).doBuild()\n \n mSpin.dagLock(ignore='ry')\n \n #Inner spin --------------------------------------------------------\n mInnerSpinGroup = mDriven.doGroup(False, False, asMeta=True)\n mInnerSpinGroup.addAttr('cgmType','innerSpin')\n \n mInnerSpinGroup.doName()\n mInnerSpinGroup.parent = mDriven\n \n mLastParent = mInnerSpinGroup \n \n mPlug = cgmMeta.cgmAttr(d_pivots['tilt'],'innerSpin',attrType='float',defaultValue = 0,keyable = True)\n \n\n if _side in ['right']:# and k not in ['inner','outer']:\n str_arg = \"{0}.ry = -{1}\".format(mInnerSpinGroup.mNode,\n mPlug.p_combinedShortName)\n log.debug(\"|{0}| >> Spin Right arg: {1}\".format(_str_func,str_arg)) \n NODEFACTORY.argsToNodes(str_arg).doBuild()\n else:\n mPlug.doConnectOut(\"{0}.ry\".format(mInnerSpinGroup.mNode)) \n \n \n \n if mPivotResult:#Do this at the very end...\n mPivotResult.parent = mLastParent", "def match_nodes(source_node, target_node):\n\n node_position = cmds.xform(source_node, q=True, ws=True, t=True)\n node_rotation = cmds.xform(source_node, q=True, ws=True, ro=True)\n cmds.xform(target_node, ws=True, t=node_position)\n cmds.xform(target_node, ws=True, ro=node_rotation)", "def make_crosswalks(street_network, sidewalk_network):\n\n intersection_nodes = street_network.nodes.get_intersection_nodes()\n # intersection_nodes = [street_network.nodes.get(nid) for nid in intersection_node_ids]\n\n # Create sidewalk nodes for each intersection node and overwrite the adjacency information\n for intersection_node in intersection_nodes:\n try:\n adj_street_nodes = street_network.get_adjacent_nodes(intersection_node)\n adj_street_nodes = sort_nodes(intersection_node, adj_street_nodes)\n v_curr = intersection_node.vector()\n\n if len(adj_street_nodes) == 3:\n # Take care of the case where len(adj_nodes) == 3.\n # Identify the largest angle that are formed by three segments\n # Make a dummy node between two vectors that form the largest angle\n # Using the four nodes (3 original nodes and a dummy node), create crosswalk nodes\n vectors = [intersection_node.vector_to(adj_street_node, normalize=True) for adj_street_node in adj_street_nodes]\n angles = [math.acos(np.dot(vectors[i - 1], vectors[i])) for i in range(3)]\n idx = np.argmax(angles)\n vec_idx = (idx + 1) % 3\n dummy_vector = - vectors[vec_idx] * distance_to_sidewalk\n inverse_vec = - vectors[vec_idx]\n # dummy_vector = inverse_vec * latlng_offset_size(vectors[vec_idx][1], vectors[vec_idx][0],\n # vector=inverse_vec,\n # distance=distance_to_sidewalk)\n dummy_coordinate_vector = v_curr + dummy_vector\n dummy_node = Node(None, dummy_coordinate_vector[0], dummy_coordinate_vector[1])\n adj_street_nodes.insert(idx, dummy_node)\n\n # Create crosswalk nodes and add a cross walk to the data structure\n try:\n crosswalk_nodes = make_crosswalk_nodes(intersection_node, adj_street_nodes)\n except ValueError:\n raise\n\n crosswalk_node_ids = [node.id for node in crosswalk_nodes]\n crosswalk_node_ids.append(crosswalk_node_ids[0])\n # crosswalk = Sidewalk(None, crosswalk_node_ids, \"crosswalk\")\n\n # Add nodes to the network\n for crosswalk_node in crosswalk_nodes:\n sidewalk_network.add_node(crosswalk_node)\n sidewalk_network.nodes.crosswalk_node_ids.append(crosswalk_node.id)\n\n # Add crosswalks to the network\n crosswalk_node_id_pairs = window(crosswalk_node_ids, 2)\n for node_id_pair in crosswalk_node_id_pairs:\n n1 = sidewalk_network.nodes.get(node_id_pair[0])\n n2 = sidewalk_network.nodes.get(node_id_pair[1])\n if len(n1.get_way_ids()) == 1 and len(n2.get_way_ids()) == 1:\n crosswalk = Sidewalk(None, list(node_id_pair), \"footway\")\n else:\n crosswalk = Sidewalk(None, list(node_id_pair), \"crosswalk\")\n sidewalk_network.add_way(crosswalk)\n\n # Connect the crosswalk nodes with correct sidewalk nodes\n connect_crosswalk_nodes(sidewalk_network, crosswalk_node_ids)\n except ValueError:\n log.exception(\"ValueError in make_sidewalks, so skipping...\")\n continue\n return", "def build_node_chains(self):\n\n self.node_chain_lookup = -np.ones(self.tri.npoints, dtype=np.int)\n self.node_chain_list = []\n\n node_chain_idx = 1\n\n self.node_chain_list.append([]) # placeholder for any isolated base-level nodes\n\n for node1 in self.node_high_to_low: \n if (self.node_chain_lookup[node1] != -1): \n continue\n\n junction, this_chain = self._node_walk_downhill(node1)\n\n if len(this_chain) > 1:\n self.node_chain_list.append(this_chain)\n \n self.node_chain_lookup[this_chain[0:-1]] = node_chain_idx \n if self.node_chain_lookup[this_chain[-1]] == -1:\n self.node_chain_lookup[this_chain[-1]] = node_chain_idx\n\n node_chain_idx += 1\n\n else: \n self.node_chain_list[0].append(this_chain[0])\n self.node_chain_lookup[this_chain[0]] = 0\n\n return", "def create_child_nodes(current_node: Node, goal: list, generated: set) -> list:\n\n children = []\n locations = state_to_locations(current_node.state)\n blank = locations[0]\n\n # Moving blank to the left\n if blank[1] != 0:\n new_locations = copy.deepcopy(locations)\n new_locations[0] = (new_locations[0][0], new_locations[0][1] - 1)\n # Modifies the location of the blank in the new list\n \"\"\" Note that the index 0 represents the first column. So long as \n the blank is not in the first column, it can be moved to the left.\"\"\"\n neighbor = current_node.state[blank[0]][blank[1] - 1]\n # Finds the number on the tile to the left of the blank\n new_locations[neighbor] = (new_locations[neighbor][0], new_locations[neighbor][1] + 1)\n # Modifies the location of the neighbor in the new list\n new_path_history = copy.deepcopy(current_node.path_history)\n new_path_history.append('L')\n new_state = locations_to_state(new_locations)\n # Constructs the new state by calling locations_to_state\n new_node = Node(new_state, current_node, current_node.path_cost + 1,\n heuristic_cal(new_state, goal), new_path_history)\n if new_node not in generated:\n children.append(new_node)\n \"\"\" Append the child node to the list only if it's not a \n repeated state.\"\"\"\n\n # Moving blank to the right\n if blank[1] != 3:\n new_locations = copy.deepcopy(locations)\n new_locations[0] = (new_locations[0][0], new_locations[0][1] + 1)\n \"\"\" Similar to the case above: so long as the blank is not in the fourth \n column, it can be moved to the right.\"\"\"\n neighbor = current_node.state[blank[0]][blank[1] + 1]\n # Finds the number on the tile to the right of the blank\n new_locations[neighbor] = (new_locations[neighbor][0], new_locations[neighbor][1] - 1)\n new_path_history = copy.deepcopy(current_node.path_history)\n new_path_history.append('R')\n new_state = locations_to_state(new_locations)\n new_node = Node(new_state, current_node, current_node.path_cost + 1,\n heuristic_cal(new_state, goal), new_path_history)\n if new_node not in generated:\n children.append(new_node)\n\n # Moving blank up\n if blank[0] != 0:\n new_locations = copy.deepcopy(locations)\n new_locations[0] = (new_locations[0][0] - 1, new_locations[0][1])\n \"\"\" So long as the blank is not in the first row, it can be moved up.\"\"\"\n neighbor = current_node.state[blank[0] - 1][blank[1]]\n # Finds the number on the tile above the blank\n new_locations[neighbor] = (new_locations[neighbor][0] + 1, new_locations[neighbor][1])\n new_path_history = copy.deepcopy(current_node.path_history)\n new_path_history.append('U')\n new_state = locations_to_state(new_locations)\n new_node = Node(new_state, current_node, current_node.path_cost + 1,\n heuristic_cal(new_state, goal), new_path_history)\n if new_node not in generated:\n children.append(new_node)\n\n # Moving the blank down\n if blank[0] != 3:\n new_locations = copy.deepcopy(locations)\n new_locations[0] = (new_locations[0][0] + 1, new_locations[0][1])\n \"\"\" So long as the blank is not in the fourth row, it can be moved down.\"\"\"\n neighbor = current_node.state[blank[0] + 1][blank[1]]\n # Finds the number on the tile below the blank\n new_locations[neighbor] = (new_locations[neighbor][0] - 1, new_locations[neighbor][1])\n new_path_history = copy.deepcopy(current_node.path_history)\n new_path_history.append('D')\n new_state = locations_to_state(new_locations)\n new_node = Node(new_state, current_node, current_node.path_cost + 1,\n heuristic_cal(new_state, goal), new_path_history)\n if new_node not in generated:\n children.append(new_node)\n\n return children", "def project(B,nodes,create_using=None):\n\n if create_using==None:\n create_using=networkx.Graph()\n\n G=networkx.empty_graph(0,create_using)\n\n for v in nodes:\n G.add_node(v)\n for nbr in B[v]:\n G.add_edges_from([(v,u) for u in B[nbr] if u!=v])\n return G", "def create_nodes(coords):\n nodes = []\n for coord in coords:\n nodes.append(Node(coord))\n\n for i, node1 in enumerate(nodes):\n node0 = nodes[i - 1]\n if i == len(nodes) - 1:\n node2 = nodes[0]\n else:\n node2 = nodes[i + 1]\n node1.connect(node0, node2)\n return nodes", "def crossover(parent1, parent2):\n path1 = random_subtree(parent1, \"\")\n path2 = random_subtree(parent2, \"\")\n parent1 = parent1.copy()\n parent2 = parent2.copy()\n loc1 = parent1\n loc1parent = parent1\n loc2 = parent2\n loc2parent = parent2\n for i in range(len(path1)):\n loc1parent = loc1\n if path1[i] == \"1\":\n loc1 = loc1parent.get_right()\n else:\n loc1 = loc1parent.get_left()\n for i in range(len(path2)):\n loc2parent = loc2\n if path2[i] == \"1\":\n loc2 = loc2parent.get_right()\n else:\n loc2 = loc2parent.get_left()\n if(len(path1)-1 >= 0):\n if path1[len(path1)-1] == \"1\":\n loc1parent.right = loc2\n else:\n loc1parent.left = loc2\n if(len(path2) - 1 >= 0):\n if path2[-1] == \"1\":\n loc2parent.right = loc1\n else:\n loc2parent.left = loc1\n return (parent1, parent2)", "def single_crossover(self, original1, original2):\n point=self.r.uniform(0.1,0.6)\n cut1=int(point*len(original1))\n cut2=int(point*len(original2))\n child1=original1[:cut1]+original2[cut2:]\n child2=original2[:cut2]+original1[cut1:]\n return child1, child2", "def RandomlySetCrossLinks(network, node_list1, node_list2,\n cross_link_density=None,\n number_cross_links=None):\n # store node lists as arrays\n nodes1 = np.array(node_list1, dtype=NODE)\n nodes2 = np.array(node_list2, dtype=NODE)\n # retrieve number of nodes\n N1, N2 = len(nodes1), len(nodes2)\n # retrieve cross adjacency matrix\n cross_A = network.cross_adjacency(nodes1, nodes2).astype(ADJ)\n\n # determine number of cross links\n if cross_link_density is not None:\n number_cross_links = int(cross_link_density * (N1 * N2))\n print(\"Setting number of cross links according to \"\n \"chosen link density.\")\n elif cross_link_density is None and number_cross_links is None:\n number_cross_links = int(cross_A.sum())\n print(\"Creating a null model for the given interacting networks.\")\n # else: take the explicitly chosen number of cross links\n if number_cross_links > (N1 * N2):\n print(\"The number of cross links exceeds maximum.\")\n print(\"Setting link density of initial interacting network.\")\n number_cross_links = int(cross_A.sum())\n\n # retrieve adjacency matrix of the full interacting network\n A_new = network.adjacency.astype(ADJ)\n # create new empty cross adjacency matrix\n cross_A_new = np.zeros((N1, N2), dtype=ADJ)\n\n _randomlySetCrossLinks(A_new, cross_A_new, number_cross_links,\n nodes1, nodes2, N1, N2)\n return InteractingNetworks(adjacency=A_new,\n directed=network.directed,\n node_weights=network.node_weights,\n silence_level=network.silence_level)", "def test_insert_node_multiple_structure_3():\n chain = N.Node(1, N.Node(3))\n node = N.Node(4)\n\n result = A8.insert_node(node, chain)\n\n assert result is not None, \"insert_node returned empty chain given a node and chain length 2 (insert at end)\"\n assert result.next is not None, \"insert_node returned short chain given a node and chain length 2 (insert at end)\"\n assert result.next.next is not None, \"insert_node returned short chain given a node and chain length 2 (insert at end)\"\n assert result.next.next.next is None, \"insert_node returned badly formed chain given a node and chain length 2 (insert at end)\"", "def create_nodes(self, topogramId, nodes):\n assert type(nodes) is list\n return self.make_request(\"POST\", \"nodes\", { \"topogramId\" : topogramId, \"nodes\" : nodes})", "def Quick3(items, lo, hi):\r\n if hi <= lo:\r\n return\r\n lt = lo\r\n gt = hi\r\n pivot = items[(hi+lo)//2]\r\n i = lo\r\n while i <= gt:\r\n if items[i] < pivot:\r\n items[lt], items[i] = items[i], items[lt]\r\n lt += 1\r\n i += 1\r\n elif items[i] > pivot:\r\n items[gt], items[i] = items[i], items[gt]\r\n gt -= 1\r\n else:\r\n i += 1\r\n print(items)\r\n Quick3(items, lo, lt - 1)\r\n Quick3(items, gt + 1, hi)", "def __create_connections(self):\n \"\"\"\n When adding diagonals, each node adds only diagonals to nodes below it.\n This prevents a case where two nodes add diagonals with each other, s.t. both diagonals are added.\n \"\"\"\n # top left corner:\n self.add_connection(self.get_junc((0, 0)).right, self.get_junc((0, 1)).left)\n self.add_connection(self.get_junc((0, 0)).down, self.get_junc((1, 0)).up)\n # diagonal to down right\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((0, 0)).right, self.get_junc((1, 1)).up)\n else:\n self.add_connection(self.get_junc((0, 0)).down, self.get_junc((1, 1)).left)\n # top row:\n for wi in range(1, self.width - 1):\n self.add_connection(self.get_junc((0, wi)).right, self.get_junc((0, wi + 1)).left)\n self.add_connection(self.get_junc((0, wi)).left, self.get_junc((0, wi - 1)).right)\n self.add_connection(self.get_junc((0, wi)).down, self.get_junc((1, wi)).up)\n # diagonal to down left\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((0, wi)).left, self.get_junc((1, wi - 1)).up)\n else:\n self.add_connection(self.get_junc((0, wi)).down, self.get_junc((1, wi - 1)).right)\n # diagonal to down right\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((0, wi)).right, self.get_junc((1, wi + 1)).up)\n else:\n self.add_connection(self.get_junc((0, wi)).down, self.get_junc((1, wi + 1)).left)\n # top right corner:\n self.add_connection(self.get_junc((0, -1)).left, self.get_junc((0, -2)).right)\n self.add_connection(self.get_junc((0, -1)).down, self.get_junc((1, -1)).up)\n # diagonal to down left\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((0, -1)).left, self.get_junc((1, -2)).up)\n else:\n self.add_connection(self.get_junc((0, -1)).down, self.get_junc((1, -2)).right)\n # middle rows:\n for hi in range(1, self.height - 1):\n # left node\n self.add_connection(self.get_junc((hi, 0)).right, self.get_junc((hi, 1)).left)\n self.add_connection(self.get_junc((hi, 0)).down, self.get_junc((hi + 1, 0)).up)\n self.add_connection(self.get_junc((hi, 0)).up, self.get_junc((hi - 1, 0)).down)\n # diagonal to down right\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((hi, 0)).right, self.get_junc((hi + 1, 1)).up)\n else:\n self.add_connection(self.get_junc((hi, 0)).down, self.get_junc((hi + 1, 1)).left)\n # middle nodes\n for wi in range(1, self.width - 1):\n self.add_connection(self.get_junc((hi, wi)).right, self.get_junc((hi, wi + 1)).left)\n self.add_connection(self.get_junc((hi, wi)).left, self.get_junc((hi, wi - 1)).right)\n self.add_connection(self.get_junc((hi, wi)).down, self.get_junc((hi + 1, wi)).up)\n self.add_connection(self.get_junc((hi, wi)).up, self.get_junc((hi - 1, wi)).down)\n # diagonal to down left\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((hi, wi)).left, self.get_junc((hi + 1, wi - 1)).up)\n else:\n self.add_connection(self.get_junc((hi, wi)).down, self.get_junc((hi + 1, wi - 1)).right)\n # diagonal to down right\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((hi, wi)).right, self.get_junc((hi + 1, wi + 1)).up)\n else:\n self.add_connection(self.get_junc((hi, wi)).down, self.get_junc((hi + 1, wi + 1)).left)\n # right node:\n self.add_connection(self.get_junc((hi, -1)).left, self.get_junc((hi, -2)).right)\n self.add_connection(self.get_junc((hi, -1)).down, self.get_junc((hi + 1, -1)).up)\n self.add_connection(self.get_junc((hi, -1)).up, self.get_junc((hi - 1, -1)).down)\n # diagonal to down left\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((hi, -1)).left, self.get_junc((hi + 1, -2)).up)\n else:\n self.add_connection(self.get_junc((hi, -1)).down, self.get_junc((hi + 1, -2)).right)\n # bottom left corner:\n self.add_connection(self.get_junc((-1, 0)).right, self.get_junc((-1, 1)).left)\n self.add_connection(self.get_junc((-1, 0)).up, self.get_junc((-2, 0)).down)\n # bottom row\n for wi in range(1, self.width - 1):\n self.add_connection(self.get_junc((-1, wi)).right, self.get_junc((-1, wi + 1)).left)\n self.add_connection(self.get_junc((-1, wi)).left, self.get_junc((-1, wi - 1)).right)\n self.add_connection(self.get_junc((-1, wi)).up, self.get_junc((-2, wi)).down)\n # bottom right corner:\n self.add_connection(self.get_junc((-1, -1)).left, self.get_junc((-1, -2)).right)\n self.add_connection(self.get_junc((-1, -1)).up, self.get_junc((-2, -1)).down)", "def dynamic_crossover(nn1, nn2):\n # Lists for respective weights\n nn1_weights = get_weights(nn1.layers)\n nn2_weights = get_weights(nn2.layers)\n child_weights = []\n\n # Iterate through all weights from all layers for crossover\n for index, _ in enumerate(nn1_weights):\n # Get single point to split the matrix in parents based on # of cols\n coulmns = np.shape(nn1_weights[index])[1]-1\n split = random.randint(0, coulmns)\n # Iterate through after a single point and set the remaing cols to nn_2\n for j in range(split, coulmns):\n nn1_weights[index][:, j] = nn2_weights[index][:, j]\n\n # After crossover add weights to child\n child_weights.append(nn1_weights[index])\n\n # Add a chance for mutation\n mutation(child_weights)\n\n # Create and return child object\n return NeuralNetwork(child_weights)", "def create_pivot(remote, frame):\n cmd = mmapi.StoredCommands()\n cmd_key = cmd.AppendSceneCommand_CreatePivot( frame.get_frame3f() )\n remote.runCommand(cmd)\n new_objs_vec = mmapi.vectori()\n cmd.GetSceneCommandResult_AppendMeshFile(cmd_key, new_objs_vec)\n return new_objs_vec[0]", "def double_crossover(self, original1, original2):\n point1=self.r.uniform(0.1,0.3)\n point2=self.r.uniform(0.6,0.8)\n len1=len(original1)\n len2=len(original2)\n cut11=int(point1*len1)\n cut12=int(point2*len1)\n cut21=int(point1*len2)\n cut22=int(point2*len2)\n child1=original1[:cut11]+original2[cut21:cut22]+original1[cut12:]\n child2=original2[:cut21]+original1[cut11:cut12]+original2[cut22:]\n return child1, child2", "def test_insert_node_multiple_content_3():\n first = 0\n second = 1\n third = 3\n chain = N.Node(first, N.Node(second))\n node = N.Node(third)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and chain length 2 (insert at end)\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at end)\"\n assert result.next.next.data == third, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at end)\"", "def make_link(Graph, node1, node2):\n if node1 not in Graph:\n Graph[node1] = {}\n (Graph[node1])[node2] = 1\n if node2 not in Graph:\n Graph[node2] = {}\n (Graph[node2])[node1] = 1\n return Graph", "def link_pivot(remote, pivot_id, obj_id):\n cmd = mmapi.StoredCommands()\n cmd_key = cmd.AppendSceneCommand_LinkPivot(pivot_id, obj_id)\n remote.runCommand(cmd)\n ok = cmd.GetSceneCommandResult_IsOK(cmd_key)\n return (ok != 0)" ]
[ "0.5853841", "0.559815", "0.5525337", "0.5493262", "0.5337593", "0.53229654", "0.5302764", "0.52075243", "0.5164342", "0.5134576", "0.5120714", "0.5114995", "0.5095743", "0.5006646", "0.4994069", "0.49922845", "0.4960581", "0.49248648", "0.4921853", "0.4896195", "0.48945636", "0.48826617", "0.48752642", "0.4874647", "0.48620048", "0.48480174", "0.48408926", "0.48313743", "0.4831067", "0.48286062" ]
0.64911985
0
Connect crosswalk nodes to sidewalk nodes. Then remove redundant sidewalk nodes around the intersection.
def connect_crosswalk_nodes(sidewalk_network, crosswalk_node_ids): # crosswalk_node_ids = crosswalk.get_node_ids()[:-1] # Crosswalk has a redundant node at the end. for crosswalk_node_id in crosswalk_node_ids[:-1]: try: # Get the intersection node and two nodes that created the intersection sidewalk node crosswalk_node = sidewalk_network.nodes.get(crosswalk_node_id) intersection_node, adjacent_street_node1, adjacent_street_node2 = crosswalk_node.parents # Connect sidewalk nodes created from adjacent_street_node1 and adjacent_street_node2 # Get sidewalk nodes that are created from the street node, and # identify which one should be connected to crosswalk_node for adjacent_street_node in [adjacent_street_node1, adjacent_street_node2]: # Skip the dummy node if len(adjacent_street_node.get_way_ids()) == 0: continue # Create a vector from the intersection node to the adjacent street node. # Then also create a vector from the intersection node to the sidewalk node v_adjacent_street_node = intersection_node.vector_to(adjacent_street_node, normalize=True) shared_street_id = intersection_node.get_shared_way_ids(adjacent_street_node)[0] try: sidewalk_node_1_from_intersection, sidewalk_node_2_from_intersection = intersection_node.get_sidewalk_nodes(shared_street_id) except TypeError: # Todo: Issue #29. Sometimes shared_street_id does not exist in the intersection_node. log.exception("connect_crosswalk_nodes(): shared_street_id %s does not exist." % shared_street_id) continue v_sidewalk_node_1_from_intersection = intersection_node.vector_to(sidewalk_node_1_from_intersection, normalize=True) # Check which one of sidewalk_node_1_from_intersection and sidewalk_node_2_from_intersection are # on the same side of the road with crosswalk_node. # If the sign of the cross product from v_adjacent_street_node to v_crosswalk_node is same as # that of v_adjacent_street_node to v_sidewalk_node_1_from_intersection, then # sidewalk_node_1_from_intersection should be on the same side. # Otherwise, sidewalk_node_2_from_intersection should be on the same side with crosswalk_node. v_crosswalk_node = intersection_node.vector_to(crosswalk_node, normalize=True) if np.cross(v_adjacent_street_node, v_crosswalk_node) * np.cross(v_adjacent_street_node, v_sidewalk_node_1_from_intersection) > 0: node_to_swap = sidewalk_node_1_from_intersection else: node_to_swap = sidewalk_node_2_from_intersection sidewalk_network.swap_nodes(node_to_swap, crosswalk_node) except ValueError: log.exception("Error while connecting crosswalk nodes, so skipping...") continue return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_crosswalks(street_network, sidewalk_network):\n\n intersection_nodes = street_network.nodes.get_intersection_nodes()\n # intersection_nodes = [street_network.nodes.get(nid) for nid in intersection_node_ids]\n\n # Create sidewalk nodes for each intersection node and overwrite the adjacency information\n for intersection_node in intersection_nodes:\n try:\n adj_street_nodes = street_network.get_adjacent_nodes(intersection_node)\n adj_street_nodes = sort_nodes(intersection_node, adj_street_nodes)\n v_curr = intersection_node.vector()\n\n if len(adj_street_nodes) == 3:\n # Take care of the case where len(adj_nodes) == 3.\n # Identify the largest angle that are formed by three segments\n # Make a dummy node between two vectors that form the largest angle\n # Using the four nodes (3 original nodes and a dummy node), create crosswalk nodes\n vectors = [intersection_node.vector_to(adj_street_node, normalize=True) for adj_street_node in adj_street_nodes]\n angles = [math.acos(np.dot(vectors[i - 1], vectors[i])) for i in range(3)]\n idx = np.argmax(angles)\n vec_idx = (idx + 1) % 3\n dummy_vector = - vectors[vec_idx] * distance_to_sidewalk\n inverse_vec = - vectors[vec_idx]\n # dummy_vector = inverse_vec * latlng_offset_size(vectors[vec_idx][1], vectors[vec_idx][0],\n # vector=inverse_vec,\n # distance=distance_to_sidewalk)\n dummy_coordinate_vector = v_curr + dummy_vector\n dummy_node = Node(None, dummy_coordinate_vector[0], dummy_coordinate_vector[1])\n adj_street_nodes.insert(idx, dummy_node)\n\n # Create crosswalk nodes and add a cross walk to the data structure\n try:\n crosswalk_nodes = make_crosswalk_nodes(intersection_node, adj_street_nodes)\n except ValueError:\n raise\n\n crosswalk_node_ids = [node.id for node in crosswalk_nodes]\n crosswalk_node_ids.append(crosswalk_node_ids[0])\n # crosswalk = Sidewalk(None, crosswalk_node_ids, \"crosswalk\")\n\n # Add nodes to the network\n for crosswalk_node in crosswalk_nodes:\n sidewalk_network.add_node(crosswalk_node)\n sidewalk_network.nodes.crosswalk_node_ids.append(crosswalk_node.id)\n\n # Add crosswalks to the network\n crosswalk_node_id_pairs = window(crosswalk_node_ids, 2)\n for node_id_pair in crosswalk_node_id_pairs:\n n1 = sidewalk_network.nodes.get(node_id_pair[0])\n n2 = sidewalk_network.nodes.get(node_id_pair[1])\n if len(n1.get_way_ids()) == 1 and len(n2.get_way_ids()) == 1:\n crosswalk = Sidewalk(None, list(node_id_pair), \"footway\")\n else:\n crosswalk = Sidewalk(None, list(node_id_pair), \"crosswalk\")\n sidewalk_network.add_way(crosswalk)\n\n # Connect the crosswalk nodes with correct sidewalk nodes\n connect_crosswalk_nodes(sidewalk_network, crosswalk_node_ids)\n except ValueError:\n log.exception(\"ValueError in make_sidewalks, so skipping...\")\n continue\n return", "def merge_sidewalks(sidewalk_network1, sidewalk_network2):\n\n for node in sidewalk_network1.nodes.get_list():\n node.confirmed = True\n\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n in_other = False\n same_node = None\n for other_sidewalk_node in sidewalk_network1.nodes.get_list():\n if sidewalk_node.location() == other_sidewalk_node.location():\n in_other = True\n same_node = other_sidewalk_node\n if not in_other: # If street network 2 contains the node but street network 1 does not\n sidewalk_network1.add_node(sidewalk_node) # Add node from street network 2 to street network 1\n else: # If both networks contain the node\n sidewalk_network2.nodes.update(sidewalk_node.id, same_node)\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n\n network1_dict = {}\n for sidewalk_node in sidewalk_network1.nodes.get_list():\n network1_dict[sidewalk_node.location] = sidewalk_node\n\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n if sidewalk_node.location not in network1_dict:\n sidewalk_network1.add_node(sidewalk_node)\n else:\n sidewalk_network2.nodes.update(sidewalk_node.id, network1_dict[sidewalk_node.location])\n\n # add new ways from sidewalk_network2 to sidewalk_network1\n for way in sidewalk_network2.ways.get_list():\n # ensure all ways have correct nids, if incorrect update to correct nid from network1\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid) is None:\n way.swap_nodes(nid, sidewalk_network2.nodes.get(nid).id)\n\n has_confirmed_parents = False\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid).confirmed:\n has_confirmed_parents = True\n if not has_confirmed_parents:\n sidewalk_network1.add_way(way)\n\n return sidewalk_network1", "def remove_nodes(self, nodes):\n for node in nodes:\n for arc in node.entries:\n arc.src.exits.remove(arc)\n self.arcs.remove(arc)\n for arc in node.exits:\n arc.dest.entries.remove(arc)\n self.arcs.remove(arc)\n self.nodes.remove(node)\n dangling_nodes = []\n for node in self.nodes:\n if node == self.start or node == self.end:\n pass\n else:\n if not node.exits or not node.entries:\n dangling_nodes.append(node)\n if dangling_nodes:\n self.remove_nodes(dangling_nodes)", "def _prune_unreached(self):\n swcdict = {}\n for n in self._data: # Hash all the swc nodes\n swcdict[n[0]] = Node(n[0])\n\n # Try to join all the unconnected branches at first\n for i, n in enumerate(self._data):\n if n[6] not in swcdict:\n # Try to match it\n matched, midx = self.match(n[2:5], n[5])\n if matched:\n self._data[i, 6] = self._data[midx, 0]\n\n # Add mutual links for all nodes\n for n in self._data:\n id = n[0]\n pid = n[6]\n if pid >= 0:\n swcdict[id].add_link(swcdict[pid])\n\n groups = connected_components(set(swcdict.values()))\n lenlist = [len(g) for g in groups]\n maxidx = lenlist.index(max(lenlist))\n set2keep = groups[maxidx]\n id2keep = [n.id for n in set2keep]\n self._data = self._data[np.in1d(self._data[:, 0], np.asarray(id2keep)), :]", "def remove_nodes_connections(self, nodes):\n nodes = ensure_list(nodes)\n for nd in nodes:\n for nd_in in self.successors[nd.name]:\n self.predecessors[nd_in.name].remove(nd)\n self.edges.remove((nd, nd_in))\n self.successors.pop(nd.name)\n self.predecessors.pop(nd.name)\n self._node_wip.remove(nd)", "def set_connection_between_nodes(self):\n\n for i, node in enumerate(self.list_empty_nodes):\n line = node.labyrinth_position[0]\n column = node.labyrinth_position[1]\n\n for j in range(i+1, len(self.list_empty_nodes)):\n line_j = self.list_empty_nodes[j].labyrinth_position[0]\n column_j = self.list_empty_nodes[j].labyrinth_position[1]\n \n if i != j and ((line == line_j and column == column_j - 1) \\\n or (line == line_j and column == column_j + 1) \\\n or (column == column_j and line == line_j - 1) \\\n or (column == column_j and line == line_j + 1)) \\\n and (not node in self.list_empty_nodes[j].connected_to) \\\n and (not self.list_empty_nodes[j] in node.connected_to):\n node.connected_to.append(self.list_empty_nodes[j])\n self.list_empty_nodes[j].connected_to.append(node)", "def disconnect_nodes(self):\n for src_id, trg_id in itertools.product(self.selected_nodes, repeat=2):\n if src_id != trg_id:\n # `discard` ignores non-existing elements (unlike `remove`)\n app.edges[src_id].discard(trg_id)\n self.mark_as_unsaved()\n self.update()", "def simplify(self):\n \n added_clumps = []\n staying_tunnels = []\n removed_clumps = set()\n \n for tunnel in self.tunnels:\n tunnel_end_distance = self.get_distance(tunnel.start, tunnel.end)\n if tunnel_end_distance - tunnel.start.distance_from_wall < 0 or \\\n tunnel_end_distance - tunnel.end.distance_from_wall < 0:\n removed_clumps.add(tunnel.start.node)\n removed_clumps.add(tunnel.end.node)\n new_node = tunnel.merge_endpoints()\n added_clumps.append(new_node)\n else:\n staying_tunnels.append(tunnel)\n #print removed_clumps\n \n new_clumps = []\n \n for clump in list(self.clumps) + added_clumps:\n if clump not in removed_clumps:\n new_clumps.append(clump)\n else:\n removed_clumps.remove(clump)\n\n if removed_clumps:\n raise Exception(\"Some removed clumps couldn't be found in the main set and I'm scared\")\n \n self.clumps = new_clumps\n self.tunnels = staying_tunnels", "def make_crosswalk_nodes(intersection_node, adj_street_nodes):\n if len(adj_street_nodes) < 4:\n raise ValueError(\"You need to pass 4 or more nodes for adj_street_nodes \")\n\n crosswalk_nodes = []\n for i in range(len(adj_street_nodes)):\n n1 = adj_street_nodes[i - 1]\n n2 = adj_street_nodes[i]\n crosswalk_node = make_crosswalk_node(intersection_node, n1, n2)\n\n # Keep track of from which streets the crosswalk nodes are created.\n way_ids = []\n for wid in n1.get_way_ids():\n way_ids.append(wid)\n for wid in n2.get_way_ids():\n way_ids.append(wid)\n way_ids = intersection_node.get_shared_way_ids(way_ids)\n\n crosswalk_node.way_ids = way_ids\n crosswalk_nodes.append(crosswalk_node)\n crosswalk_node.parents = (intersection_node, n1, n2)\n\n return crosswalk_nodes", "def handle_diagonals_crossing_connections(self, conn: Connection):\n if not self.is_connection_diagonal(conn):\n return False\n j1 = self.get_junc_from_node(conn.me)\n j2 = self.get_junc_from_node(conn.other)\n # check if top-left to bottom-right diagonal of top-right to bottom-left diagonal\n indices_diff = (j1.indices.row - j2.indices.row, j1.indices.col - j2.indices.col)\n if indices_diff[0] == indices_diff[1]:\n # top-left to bottom-right\n top_left = j1 if indices_diff[0] == -1 else j2 # else diff is 1\n top_right = self.get_junc((top_left.indices.row, top_left.indices.col + 1))\n bottom_left = self.get_junc((top_left.indices.row + 1, top_left.indices.col))\n if self.are_juncs_connected(top_right, bottom_left):\n # print(conn, top_right, bottom_left, sep=\"\\n\")\n # we should remove the connection.\n if bottom_left.right.node_id in top_right.down.get_connections_ids():\n top_right.down.remove_connection_by_id(bottom_left.right.node_id)\n if bottom_left.up.node_id in top_right.left.get_connections_ids():\n top_right.left.remove_connection_by_id(bottom_left.up.node_id)\n else:\n # top-right to bottom-left\n top_right = j1 if indices_diff[0] == -1 else j2 # else diff is 1\n top_left = self.get_junc((top_right.indices.row, top_right.indices.col - 1))\n bottom_right = self.get_junc((top_right.indices.row + 1, top_right.indices.col))\n if self.are_juncs_connected(top_left, bottom_right):\n # print(conn, top_left, bottom_right, sep=\"\\n\")\n # we should remove the connection.\n if bottom_right.left.node_id in top_left.down.get_connections_ids():\n top_left.down.remove_connection_by_id(bottom_right.left.node_id)\n if bottom_right.up.node_id in top_left.right.get_connections_ids():\n top_left.right.remove_connection_by_id(bottom_right.up.node_id)", "def prune_to_nodes(self, nodes, merge_monotomies=True):\n to_remove = self.leaves - set(nodes) # This is sufficient to erode all unwanted internal nodes.\n for node in to_remove:\n self.remove_tree_node(node)\n parent = node.parent\n if parent in nodes:\n continue # Only happens if the user wants to keep an internal node.\n elif merge_monotomies and len(parent.children) == 1:\n sib = parent.children[0]\n if parent != self.root:\n # node.parent only has 1 child, so it's removed and node's sib is connected to node's grandparent.\n sib.branch += parent.branch\n par_index = parent.parent.children.index(parent)\n parent.parent.children[par_index] = sib\n sib.parent = parent.parent\n else:\n # self.root now has only 1 child, so it's replaced by that child.\n self.root = sib\n self.root.branch = 0\n self.remove_tree_node(parent, remove_from_parent=False)\n self.process_tree_nodes()", "def prune_unlinked(self):\n linked_ids = set()\n for (link_from, link_to, link_style, link_tail) in self.links:\n linked_ids.add(link_from)\n linked_ids.add(link_to)\n nodes_to_delete = []\n for name, node in self.nodes.items():\n if node.node_id not in linked_ids:\n nodes_to_delete.append(name)\n for name in nodes_to_delete:\n del self.nodes[name]", "def __dfs_roads_directions(self, with_prints=False) -> Set[JuncRoadSingleConnection]:\n\n roads: Set[JuncRoadSingleConnection] = set()\n visited_indices: Set[JuncIndices] = set()\n\n def dfs_rec(junc: JuncNode):\n \"\"\"\n recursively run from the input junction\n :param junc: the junction to run from\n \"\"\"\n # add to visited\n visited_indices.add(junc.indices)\n # run on neighbors\n for neighbor in self.get_connected_juncs(junc):\n # go over unvisited juncs and add roads to them from current\n if neighbor.indices not in visited_indices:\n if with_prints:\n print(junc.indices, neighbor.indices)\n directions = self.get_connection_directions(junc, neighbor)\n roads.add(JuncRoadSingleConnection(junc.indices, neighbor.indices, directions[0], directions[1]))\n dfs_rec(neighbor)\n \"\"\"\n there is a case where we are currently at junc 1, which has neighbors 2,3.\n from junc 1 wwe move to 2, that moves to 3 from it.\n 3 will not go to 1 because 1 is visited, so the road 3->1 will not be created.\n when returning to 1, it will not go to 3, because 3 is visited, so the road 1->3 will not be created.\n so we result in a conncetion with no road, fix it:\n \"\"\"\n if neighbor.indices in visited_indices \\\n and JuncRoadSingleConnection(junc.indices, neighbor.indices) not in roads \\\n and JuncRoadSingleConnection(neighbor.indices, junc.indices) not in roads:\n if with_prints:\n print(\"special\", junc.indices, neighbor.indices)\n directions = self.get_connection_directions(junc, neighbor)\n roads.add(JuncRoadSingleConnection(junc.indices, neighbor.indices, directions[0], directions[1]))\n # do not call dfs recursivly\n\n def first_node(junc: JuncNode):\n \"\"\"\n run specifically for a group-start junction\n :param junc: a junction that is the first in a connected group\n \"\"\"\n # add to visited\n visited_indices.add(junc.indices)\n #\n \"\"\"\n first, choose a random road to be in-road. this prevents a first node with only out-roads.\n a problem: if this is not the first first_node, it is possible that the random road will be \n an already existing road, in the otehr direction. so we need to make sure that the random road we choose\n is not already set in the other side.\n \"\"\"\n neighbors = self.get_connected_juncs(junc).copy()\n in_checked_indices: Set[JuncIndices] = set()\n in_road_junc = choice(neighbors)\n in_checked_indices.add(in_road_junc.indices)\n while JuncRoadSingleConnection(junc.indices, in_road_junc.indices) in roads and len(\n in_checked_indices) != len(neighbors):\n in_road_junc = choice(neighbors)\n in_checked_indices.add(in_road_junc.indices)\n if len(in_checked_indices) != len(neighbors):\n # regular stop, we have found a road to be in_road.\n directions = self.get_connection_directions(in_road_junc, junc)\n roads.add(JuncRoadSingleConnection(in_road_junc.indices, junc.indices, directions[0], directions[1]))\n if with_prints:\n print(\"first in-road\", in_road_junc.indices, junc.indices)\n # run for the rest of the neighbors\n neighbors.remove(in_road_junc)\n # else, this junc has only out-roads and this cannot be fixed.\n for neighbor in neighbors:\n if neighbor.indices not in visited_indices: # the other case is handled through the neighbor in dfs_rec\n if with_prints:\n print(\"first\", junc.indices, neighbor.indices)\n directions = self.get_connection_directions(junc, neighbor)\n roads.add(JuncRoadSingleConnection(junc.indices, neighbor.indices, directions[0], directions[1]))\n dfs_rec(neighbor)\n\n all_juncs_indices: Set[JuncIndices] = {junc.indices for junc in self.get_all_juncs()}\n # the graph may not be connected, should run until all connected parts are visited\n while len(all_juncs_indices) != len(visited_indices):\n # now choose a junc and run on it.\n start_junc = self.get_junc(sample(all_juncs_indices.difference(visited_indices), 1)[0])\n first_node(start_junc)\n return roads", "def _pair_based_graph_cut(self, graph):\n for node in self._find_paired_nodes(graph):\n graph.remove_node(node)\n return", "def merge(self):\n leftSon = self.nodes.getNode(0)\n rightSon = self.nodes.getNode(1)\n self.nodes.removeNode(0)\n self.nodes.removeNode(0)\n self.nodes.addNode(Node(leftSon, rightSon))", "def make_sidewalk_nodes(street, prev_node, curr_node, next_node):\n if prev_node is None:\n v = - curr_node.vector_to(next_node, normalize=False)\n vec_prev = curr_node.vector() + v\n prev_node = Node(None, vec_prev[0], vec_prev[1])\n elif next_node is None:\n v = - curr_node.vector_to(prev_node, normalize=False)\n vec_next = curr_node.vector() + v\n next_node = Node(None, vec_next[0], vec_next[1])\n\n curr_latlng = np.array(curr_node.location())\n\n v_cp_n = curr_node.vector_to(prev_node, normalize=True)\n v_cn_n = curr_node.vector_to(next_node, normalize=True)\n v_sidewalk = v_cp_n + v_cn_n\n\n if np.linalg.norm(v_sidewalk) < 1e-10:\n v_sidewalk_n = np.array([v_cn_n[1], - v_cn_n[0]])\n else:\n v_sidewalk_n = v_sidewalk / np.linalg.norm(v_sidewalk)\n\n p1 = curr_latlng + street.distance_to_sidewalk * v_sidewalk_n\n p2 = curr_latlng - street.distance_to_sidewalk * v_sidewalk_n\n\n p_sidewalk_1 = Node(None, p1[0], p1[1])\n p_sidewalk_2 = Node(None, p2[0], p2[1])\n\n curr_node.append_sidewalk_node(street.id, p_sidewalk_1)\n curr_node.append_sidewalk_node(street.id, p_sidewalk_2)\n\n # Figure out on which side you want to put each sidewalk node\n v_c1 = curr_node.vector_to(p_sidewalk_1)\n if np.cross(v_cn_n, v_c1) > 0:\n return p_sidewalk_1, p_sidewalk_2\n else:\n return p_sidewalk_2, p_sidewalk_1", "def clean_edges(self):\n for from_node in self.all_nodes():\n for to_node in self.all_nodes():\n if from_node == to_node:\n continue\n dup = list(filter(lambda x: x.from_node == from_node and x.to_node == to_node, self.edges))\n if len(dup) > 1:\n for d in dup[1:]:\n self.edges.remove(d)", "def _connect_neighbours(self):\n for prev in self.unvisited:\n for next in self.unvisited:\n if (next[0] == prev[0] and next[1] == prev[1] + 1) or (next[0] == prev[0] + 1 and next[1] == prev[1]):\n self.graph.addEdge((prev, next))\n self.visited.add(prev)\n self.visited.add(next)\n if self._find_intersection():\n self.intersection.append(prev)\n self.intersection.append(next)", "def multipleInBetweenLayerEdgesIntoNodeWithNoFixedPortOrderCauseCrossings(self):\n graph = self.graph\n makeLayer = self.makeLayer\n addNodesToLayer = self.addNodesToLayer\n eastWestEdgeFromTo = self.eastWestEdgeFromTo\n\n leftLayer = makeLayer(graph)\n leftNodes = addNodesToLayer(2, leftLayer)\n rightLayer = makeLayer(graph)\n rightNodes = addNodesToLayer(3, rightLayer)\n\n self.addInLayerEdge(rightNodes[0], rightNodes[2], PortSide.WEST)\n eastWestEdgeFromTo(leftNodes[0], rightNodes[1])\n eastWestEdgeFromTo(leftNodes[0], rightNodes[1])\n\n return graph", "def connect_nodes(self):\n for src_id, trg_id in itertools.product(self.selected_nodes, repeat=2):\n if src_id != trg_id:\n app.edges[src_id].add(trg_id)\n self.mark_as_unsaved()\n self.update()", "def prune_to(self, names, merge_monotomies=True):\n self.prune_to_nodes(self.get_nodes(names), merge_monotomies)", "def connect(self, other):\n other.walls.remove(other._wall_to(self))\n self.walls.remove(self._wall_to(other))", "def _unprune_referenced_sub_workflows(self, keep_paths, prune_paths):\n\n keep_nodes = frozenset([path[-1] for path in keep_paths])\n\n shift_path_indexes = frozenset(\n idx for (idx, path) in enumerate(prune_paths)\n if any(node in keep_nodes for node in path))\n\n if not shift_path_indexes:\n return (keep_paths, prune_paths)\n\n for idx in shift_path_indexes:\n node = prune_paths[idx][-1]\n logger.info(\n \"Keeping node %s.%s because it is downstream of an --only-nodes argument\",\n node[0],\n node[1])\n\n return self._unprune_referenced_sub_workflows(\n keep_paths + [prune_paths[i] for i in shift_path_indexes],\n [path for (i, path) in enumerate(prune_paths) if i not in shift_path_indexes])", "def remove_previous_connections(self, nodes):\n nodes = ensure_list(nodes)\n for nd in nodes:\n for nd_out in self.predecessors[nd.name]:\n if nd_out.name in self.successors:\n self.successors[nd_out.name].remove(nd)\n self.edges.remove((nd_out, nd))\n self.successors.pop(nd.name)\n self.predecessors.pop(nd.name)\n self._node_wip.remove(nd)", "def prune_delta(links, nodes, shoreline_shp, inlets_shp, gdobj,\r\n prune_less):\r\n # Get inlet nodes\r\n nodes = find_inlet_nodes(nodes, inlets_shp, gdobj)\r\n\r\n if prune_less is False:\r\n # Remove spurs from network (this includes valid inlets and outlets)\r\n links, nodes = lnu.remove_all_spurs(links, nodes,\r\n dontremove=list(nodes['inlets']))\r\n\r\n # Clip the network with a shoreline polyline, adding outlet nodes\r\n links, nodes = clip_by_shoreline(links, nodes, shoreline_shp, gdobj)\r\n\r\n # Remove spurs from network (this includes valid inlets and outlets)\r\n links, nodes = lnu.remove_all_spurs(links, nodes,\r\n dontremove=list(nodes['inlets']) +\r\n list(nodes['outlets']))\r\n\r\n # Remove sets of links that are disconnected from inlets/outlets except for\r\n # a single bridge link (effectively re-pruning the network)\r\n links, nodes = lnu.remove_disconnected_bridge_links(links, nodes)\r\n\r\n # # Add artificial nodes where necessary\r\n # links, nodes = lnu.add_artificial_nodes(links, nodes, gdobj)\r\n\r\n # Remove one-node links\r\n links, nodes = lnu.remove_single_pixel_links(links, nodes)\r\n\r\n # Find parallel links\r\n links, nodes = lnu.find_parallel_links(links, nodes)\r\n\r\n return links, nodes", "def __create_connections(self):\n \"\"\"\n When adding diagonals, each node adds only diagonals to nodes below it.\n This prevents a case where two nodes add diagonals with each other, s.t. both diagonals are added.\n \"\"\"\n # top left corner:\n self.add_connection(self.get_junc((0, 0)).right, self.get_junc((0, 1)).left)\n self.add_connection(self.get_junc((0, 0)).down, self.get_junc((1, 0)).up)\n # diagonal to down right\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((0, 0)).right, self.get_junc((1, 1)).up)\n else:\n self.add_connection(self.get_junc((0, 0)).down, self.get_junc((1, 1)).left)\n # top row:\n for wi in range(1, self.width - 1):\n self.add_connection(self.get_junc((0, wi)).right, self.get_junc((0, wi + 1)).left)\n self.add_connection(self.get_junc((0, wi)).left, self.get_junc((0, wi - 1)).right)\n self.add_connection(self.get_junc((0, wi)).down, self.get_junc((1, wi)).up)\n # diagonal to down left\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((0, wi)).left, self.get_junc((1, wi - 1)).up)\n else:\n self.add_connection(self.get_junc((0, wi)).down, self.get_junc((1, wi - 1)).right)\n # diagonal to down right\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((0, wi)).right, self.get_junc((1, wi + 1)).up)\n else:\n self.add_connection(self.get_junc((0, wi)).down, self.get_junc((1, wi + 1)).left)\n # top right corner:\n self.add_connection(self.get_junc((0, -1)).left, self.get_junc((0, -2)).right)\n self.add_connection(self.get_junc((0, -1)).down, self.get_junc((1, -1)).up)\n # diagonal to down left\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((0, -1)).left, self.get_junc((1, -2)).up)\n else:\n self.add_connection(self.get_junc((0, -1)).down, self.get_junc((1, -2)).right)\n # middle rows:\n for hi in range(1, self.height - 1):\n # left node\n self.add_connection(self.get_junc((hi, 0)).right, self.get_junc((hi, 1)).left)\n self.add_connection(self.get_junc((hi, 0)).down, self.get_junc((hi + 1, 0)).up)\n self.add_connection(self.get_junc((hi, 0)).up, self.get_junc((hi - 1, 0)).down)\n # diagonal to down right\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((hi, 0)).right, self.get_junc((hi + 1, 1)).up)\n else:\n self.add_connection(self.get_junc((hi, 0)).down, self.get_junc((hi + 1, 1)).left)\n # middle nodes\n for wi in range(1, self.width - 1):\n self.add_connection(self.get_junc((hi, wi)).right, self.get_junc((hi, wi + 1)).left)\n self.add_connection(self.get_junc((hi, wi)).left, self.get_junc((hi, wi - 1)).right)\n self.add_connection(self.get_junc((hi, wi)).down, self.get_junc((hi + 1, wi)).up)\n self.add_connection(self.get_junc((hi, wi)).up, self.get_junc((hi - 1, wi)).down)\n # diagonal to down left\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((hi, wi)).left, self.get_junc((hi + 1, wi - 1)).up)\n else:\n self.add_connection(self.get_junc((hi, wi)).down, self.get_junc((hi + 1, wi - 1)).right)\n # diagonal to down right\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((hi, wi)).right, self.get_junc((hi + 1, wi + 1)).up)\n else:\n self.add_connection(self.get_junc((hi, wi)).down, self.get_junc((hi + 1, wi + 1)).left)\n # right node:\n self.add_connection(self.get_junc((hi, -1)).left, self.get_junc((hi, -2)).right)\n self.add_connection(self.get_junc((hi, -1)).down, self.get_junc((hi + 1, -1)).up)\n self.add_connection(self.get_junc((hi, -1)).up, self.get_junc((hi - 1, -1)).down)\n # diagonal to down left\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((hi, -1)).left, self.get_junc((hi + 1, -2)).up)\n else:\n self.add_connection(self.get_junc((hi, -1)).down, self.get_junc((hi + 1, -2)).right)\n # bottom left corner:\n self.add_connection(self.get_junc((-1, 0)).right, self.get_junc((-1, 1)).left)\n self.add_connection(self.get_junc((-1, 0)).up, self.get_junc((-2, 0)).down)\n # bottom row\n for wi in range(1, self.width - 1):\n self.add_connection(self.get_junc((-1, wi)).right, self.get_junc((-1, wi + 1)).left)\n self.add_connection(self.get_junc((-1, wi)).left, self.get_junc((-1, wi - 1)).right)\n self.add_connection(self.get_junc((-1, wi)).up, self.get_junc((-2, wi)).down)\n # bottom right corner:\n self.add_connection(self.get_junc((-1, -1)).left, self.get_junc((-1, -2)).right)\n self.add_connection(self.get_junc((-1, -1)).up, self.get_junc((-2, -1)).down)", "def clean_edges(self):", "def crossing_minimization(self):\n self.layer_sweep()", "def remove_hydrogens(self) -> None:\n for cid, c in self:\n for rid, r in c:\n for aid, a in r:\n if a.element == 'H':\n print('removing H at %s' % aid)\n r.remove_atom(a)", "def optimize_left(connect):\n if intersect(connect[0], connect[1], connect[2]) == False:\n return connect\n else:\n connect = connect_left(connect[0], connect[2])\n if intersect(connect[0], connect[1], connect[2]) == True:\n return optimize_left(connect)\n else:\n return connect" ]
[ "0.6593879", "0.6568376", "0.57184863", "0.57171595", "0.5688481", "0.5669665", "0.5584961", "0.5583269", "0.55469465", "0.5519208", "0.5487563", "0.54811114", "0.5460484", "0.5458733", "0.5423852", "0.5416431", "0.53239524", "0.5261848", "0.5261536", "0.5253484", "0.5230734", "0.5198505", "0.51965165", "0.5193734", "0.51382315", "0.5102773", "0.50952035", "0.50682783", "0.50466824", "0.5045582" ]
0.72504723
0
Splits a large OSM files
def split_large_osm_file(filename): command = "java -Xmx4000M -jar ./lib/splitter.jar --output=xml --output-dir=data --max-nodes=15000 " + filename + " > splitter.log" os.system(command)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_file(self, input_file):\r\n file_list = [] \r\n with open(input_file, 'r', encoding='GB18030', errors='ignore') as f_in:\r\n data = f_in.readlines()\r\n lines_num = len(data)\r\n size = lines_num // self.num_workers # lines splitted in a chunk\r\n start = 0\r\n end = size\r\n w_path = \"../data/\"\r\n for i in range(lines_num//size):\r\n chunk_name = \"chunk_\" + str(i) + \".dat\"\r\n with open(w_path + chunk_name, 'w', encoding='utf-8') as f_out:\r\n f_out.write(''.join(data[start:end]))\r\n start = start + size\r\n end = end + size\r\n file_list.append(\"../data/chunk_\" + str(i) + \".dat\")\r\n \r\n print(f\"File splitted into {self.num_workers} chunks.\")\r\n return file_list, size", "def split_per(folderin, folderout, split_col='ECO_ID', colNms=['i_h100','i_cd',\n 'doy','i_wflen','i_acqdate','b1','vcf','ECO_NAME','ECO_ID','BIOME','geometry']):\n\n split_files = glob.glob(folderin + '*.shp')\n\n for filename in split_files:\n print(filename)\n basename = os.path.splitext(os.path.basename(filename))[0]\n dfa = gpd.read_file(filename)\n df = dfa.astype({split_col: 'int32'}) \n ecoNames = list(np.unique(df[split_col]))#get list of unique ecoregions \n \n for eco in ecoNames:\n #create new df with just columns I want\n df2 = gpd.GeoDataFrame(df, columns=colNms)\n ID = str(eco)\n df_eco = df.loc[df2[split_col]==eco, colNms]\n df_eco.to_file(folderout + '/{}_eco_{}.shp'.format(basename, ID))", "def split(self):\n \n spl = self.which('split')\n if spl:\n self.__tmp = \"/tmp\"\n self.__tmpout = \"/tmp/output\"\n if not os.path.exists(self.__tmpout):\n os.makedirs(self.__tmpout)\n #os.chdir(\"/tmp\")\n '''\n assume split prog overwrites existing files if\n there is a conflict in file names\n '''\n #thecommand = \"%s -a 3 -b 500k %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n thecommand = \"%s -a 3 -b 10m %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n os.system(thecommand)\n dirList=os.listdir(self.__tmpout)\n #self.constructCat(dirList)\n for chunkfilename in dirList:\n #print chunkfilename \n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n #print self.__cat\n self.__flist.append(self.__tmpout + \"/\" + chunkfilename)\n #print self.__flist\n self.writeLog(chunkfilename, self.md5(fileName=self.__tmpout + \"/\" + chunkfilename))\n self.__numchunks = len([item for item in os.listdir(self.__tmpout) if os.path.isfile(self.__tmpout + \"/\" + item)])\n else:\n try:\n f = open(self.__filename, 'rb')\n except (OSError, IOError), e:\n raise FileSplitterException, str(e)\n \n bname = (os.path.split(self.__filename))[1]\n # Get the file size\n fsize = os.path.getsize(self.__filename)\n # dynamically calculate number of chunks\n strfsize = str(fsize)\n '''\n in MB's\n 8 - teens\n 9 - hundreds\n 10 - gigabytes\n '''\n if len(strfsize) == 8:\n #self.__numchunks = fsize/100000\n self.__numchunks = fsize/50000\n elif len(strfsize) == 9:\n #self.__numchunks = fsize/1000000\n self.__numchunks = fsize/500000\n elif len(strfsize) == 10:\n #self.__numchunks = fsize/10000000\n self.__numchunks = fsize/5000000\n #print '\\nSplitting file %s into %d chunks' % (self.__filename, self.__numchunks)\n # Get size of each chunk\n self.__chunksize = int(float(fsize)/float(self.__numchunks))\n \n chunksz = self.__chunksize\n total_bytes = 0\n \n for x in range(self.__numchunks):\n #chunkfilename = bname + '-' + str(x+1) + self.__postfix\n chunkfilename = bname + ('-%03d' % (x+1)) + self.__postfix\n # kill residual file if it exists\n if os.path.exists(chunkfilename):\n os.remove(chunkfilename)\n \"\"\"\n if reading the last section, calculate correct\n chunk size.\n \"\"\"\n if x == self.__numchunks - 1:\n chunksz = fsize - total_bytes\n \n try:\n if self.__debug:\n print 'Writing file chunk: %s' % chunkfilename\n data = f.read(chunksz)\n total_bytes += len(data)\n chunkf = file(chunkfilename, 'wb')\n chunkf.write(data)\n chunkf.close()\n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n self.__flist.append(chunkfilename)\n self.writeLog(chunkfilename, self.md5(fileName=chunkfilename))\n except (OSError, IOError), e:\n print e\n continue\n except EOFError, e:\n print e\n break\n\n print '\\nSplit complete on file: %s into %d chunks\\n' % (self.__filename, self.__numchunks)\n self.__logfhandle.close()\n #self.__cat += \"> \" + self.__remotepath + \"/\" + self.__filename\n self.set_cat_statement()", "def splitFileIntoShards(filename, shardsize):\n os.popen('split -a 4 -d --additional-suffix=_shard -l{} {}'.format(shardsize, filename))", "def split_train_into_chunks(chunk_size):\n for syscall_type in SYSCALLS:\n syscalls_split_file = open(f\"{TEMP_DIR}/{syscall_type}-split.train\", \"w\")\n snd_train_path = f\"{FILE_PATH}/{syscall_type}/{syscall_type}.train\"\n with open(snd_train_path) as train_file:\n for syscall in train_file:\n # Generate all n-grams of the current syscall\n n_grams = extract_n_grams(syscall.strip(),chunk_size,unique=True)\n if len(n_grams)==0:\n continue\n # Write n-grams to syscall chunks file\n syscalls_split_file.writelines(n_grams)\n syscalls_split_file.close()", "def split_file(self):\n title = \"row_id,x,y,accuracy,time,place_id\\n\"\n print \"splitting files into grid files...\"\n sub_folder = os.path.join(Setting.grid_path, str(self.xsplit)+\"_\"+str(self.ysplit))\n if not os.path.exists(sub_folder):\n os.mkdir(sub_folder)\n for m in range(self.xsplit):\n # to avoid open too many files (ysplit should less than 1000 here)\n print \"starting No.\", m, \" subprocess...\"\n train_writers = []\n for n in range(self.ysplit):\n xfolder = os.path.join(sub_folder, str(m))\n if not os.path.exists(xfolder):\n os.mkdir(xfolder)\n yfolder = os.path.join(xfolder, str(n))\n if not os.path.exists(yfolder):\n os.mkdir(yfolder)\n train_file = os.path.join(yfolder, \"train.csv\")\n train_writers.append(open(train_file, \"w\"))\n train_writers[-1].write(title)\n\n for record in read_record(self.train_path):\n place_id = record[-1]\n rec_str = \",\".join([str(x) for x in record])\n for n in range(self.ysplit):\n row_id = 1\n slot = m*self.ysplit + n\n if place_id in self.grid_place[slot]:\n train_writers[n].write(str(row_id) + \",\" + rec_str + \"\\n\")\n row_id += 1\n\n for writer in train_writers:\n writer.close()\n\n test_writers = []\n for n in range(self.ysplit):\n test_file = os.path.join(sub_folder, str(m), str(n), \"test.csv\")\n test_writers.append(open(test_file, \"w\"))\n test_writers[-1].write(title)\n\n for record in read_record(self.test_path):\n x_ind, y_ind = grid_cut(record[0], record[1], self.xsplit, self.ysplit)\n grid_slot = x_ind*self.ysplit + y_ind\n for n in range(self.ysplit):\n row_id = 1\n slot = m*self.ysplit + n\n if grid_slot == slot:\n rec_str = \",\".join([str(x) for x in record])\n test_writers[n].write(str(row_id) + \",\" + rec_str + \"\\n\")\n row_id += 1\n\n for writer in test_writers:\n writer.close()", "def splitFile(f, rootdir=\"/tmp\", splitCmd=\"/usr/bin/split\", chunkSize=\"100m\"):\n d = str(uuid.uuid4())\n path = os.path.join(rootdir, d)\n # I want it to fail hard here\n os.makedirs(path)\n prefix = os.path.join(path, \"chunk-\")\n subprocess.check_call([splitCmd, \"-b\", chunkSize, \"-d\", \"-a\", \"5\", f, prefix])\n chunks = glob.glob(os.path.join(path, \"chunk-*\"))\n chunks.sort()\n return chunks", "def load_chunk_data(self, split_filenames: List, split_feature_dir: str, gt_meta_dir: str,\n doa_format: str = 'xyz', split: str = 'train'):\n pointer = 0\n features_list = []\n filename_list = []\n sed_targets_list = []\n doa_targets_list = []\n idxes_list = []\n for filename in split_filenames:\n feature_fn = os.path.join(split_feature_dir, filename + '.h5')\n # Load feature\n with h5py.File(feature_fn, 'r') as hf:\n feature = hf['feature'][:]\n # Normalize feature\n feature = (feature - self.mean) / self.std\n n_frames = feature.shape[1]\n # Load gt info from metadata\n gt_meta_fn = os.path.join(gt_meta_dir, filename + '.csv')\n df = pd.read_csv(gt_meta_fn, header=None,\n names=['frame_number', 'sound_class_idx', 'track_number', 'azimuth', 'elevation'])\n frame_number = df['frame_number'].values\n sound_class_idx = df['sound_class_idx'].values\n track_number = df['track_number'].values\n azimuth = df['azimuth'].values\n elevation = df['elevation'].values\n # Generate target data\n sed_target = np.zeros((n_frames, self.n_classes), dtype=np.float32)\n azi_target = np.zeros((n_frames, self.n_classes), dtype=np.float32)\n ele_target = np.zeros((n_frames, self.n_classes), dtype=np.float32)\n nsources_target = np.zeros((n_frames, 3), dtype=np.float32)\n count_sources_target = np.zeros((n_frames,), dtype=np.float32)\n for itrack in np.arange(5):\n track_idx = track_number == itrack\n frame_number_1 = frame_number[track_idx]\n sound_class_idx_1 = sound_class_idx[track_idx]\n azimuth_1 = azimuth[track_idx]\n elevation_1 = elevation[track_idx]\n for idx, iframe in enumerate(frame_number_1):\n start_idx = int(iframe * self.label_upsample_ratio - self.label_upsample_ratio//2)\n start_idx = np.max((0, start_idx))\n end_idx = int(start_idx + self.label_upsample_ratio)\n end_idx = np.min((end_idx, n_frames))\n class_idx = int(sound_class_idx_1[idx])\n sed_target[start_idx:end_idx, class_idx] = 1.0\n azi_target[start_idx:end_idx, class_idx] = azimuth_1[idx] * np.pi / 180.0 # Radian unit\n ele_target[start_idx:end_idx, class_idx] = elevation_1[idx] * np.pi / 180.0 # Radian unit\n count_sources_target[start_idx:end_idx] += 1\n # Convert nsources to one-hot encoding\n for i in np.arange(3):\n idx = count_sources_target == i\n nsources_target[idx, i] = 1.0\n # Doa target\n if doa_format == 'polar':\n doa_target = np.concatenate((azi_target, ele_target), axis=-1)\n elif doa_format == 'xyz':\n x = np.cos(azi_target) * np.cos(ele_target)\n y = np.sin(azi_target) * np.cos(ele_target)\n z = np.sin(ele_target)\n doa_target = np.concatenate((x, y, z), axis=-1)\n # Get segment indices\n n_crop_frames = n_frames\n assert self.chunk_len <= n_crop_frames, 'Number of cropped frame is less than chunk len'\n idxes = np.arange(pointer, pointer + n_crop_frames - self.chunk_len + 1, self.chunk_hop_len).tolist()\n # Include the leftover of the cropped data\n if (n_crop_frames - self.chunk_len) % self.chunk_hop_len != 0:\n idxes.append(pointer + n_crop_frames - self.chunk_len)\n pointer += n_crop_frames\n # Append data\n features_list.append(feature)\n filename_list.extend([filename] * len(idxes))\n sed_targets_list.append(sed_target)\n doa_targets_list.append(doa_target)\n idxes_list.append(idxes)\n\n if len(features_list) > 0:\n features = np.concatenate(features_list, axis=1)\n sed_targets = np.concatenate(sed_targets_list, axis=0)\n doa_targets = np.concatenate(doa_targets_list, axis=0)\n chunk_idxes = np.concatenate(idxes_list, axis=0)\n test_batch_size = len(idxes) # to load all chunks of the same file\n return features, sed_targets, doa_targets, chunk_idxes, filename_list, test_batch_size\n else:\n return None, None, None, None, None", "def split_lod_by_item(lod, max_items=10000):\n max_items = min(max_items, SF_BULK_MAX_ITEM)\n files = []\n for i in range(0, len(lod), max_items):\n files.append(lod[i:i + max_items])\n return files", "def split_large_groups(ctx):\n asyncio.run(split_large_groups_impl(ctx.obj[\"config\"]))", "def split_start(infiles, outfiles):\n\n # split always runs exactly one job (unlike @subdivide)\n # So it implicitly combines all its inputs before running and generating multiple output\n # @originate generates multiple output so the input for @split is a list...\n infile = infiles[0]\n\n # clean up previous\n for f in outfiles:\n os.unlink(f)\n\n\n #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n #\n # Create more files than the previous invocation\n #\n #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n n_to_produce = len(outfiles) + 1\n for i in range(n_to_produce):\n f = '{}{}.split'.format(tempdir, i)\n open(f, 'a').close()", "def split_input(self):\n namenode = self.runner.namenode\n splitter = Splitter(RECORDS_PER_BLOCK)\n results = []\n input_files = []\n for fname in self.inputs:\n input_files.append(RecordFile(fname, namenode))\n\n taskid = 0\n for block in splitter.split(input_files):\n fname = map_input(self.id, taskid)\n taskid += 1\n namenode.create_file(fname)\n\n bytes_written = 0\n for record in block:\n bytes_written += namenode.write_file(fname, bytes_written,\n record)\n\n namenode.close_file(fname)\n results.append(fname)\n self.open_files.append(fname)\n\n for file_ in input_files:\n file_.close()\n\n return results", "def data_process(self):\n logging.info('Processing the data and split files')\n lines = Utility.file_len(self.fname)\n self.lines_to_be, self.split_files = Utility.split_files(self.fname, lines,\n cpu_count().real)", "def split_file(filename, split_num):\n root, ext = os.path.splitext(filename)\n with open(filename) as f:\n lines = f.readlines()\n total_line = len(lines)\n\n print lines[0].split('\\t')\n\n size = total_line / split_num\n\n print 'Total line: %d, splited file line number: %d' % (total_line, size)\n\n total_line - size * split_num\n for i in range(0, split_num):\n split_file = root + '_' + str(i+1) + ext\n\n start = i * size;\n end = (i+1) * size;\n if i == split_num - 1:\n end = total_line\n\n print 'splite file %s: line from %d to %d' % (split_file, start, end)\n\n with open(split_file, 'w') as fw:\n for j in range(start, end):\n fw.write('%s' % lines[j])", "def split_single_file(self, filename):\n file_size = os.path.getsize(filename)\n chunk_size = (file_size + self.worker_num - 1) / self.worker_num\n file_handler = open(filename, \"r\")\n chunks = []\n pos = 0\n while pos < file_size:\n next_pos = min(pos + chunk_size, file_size)\n if pos == 0:\n chunks.append((filename, pos, self.find_next_newline(file_handler, next_pos)))\n else:\n chunks.append((filename, self.find_next_newline(file_handler, pos), self.find_next_newline(file_handler, next_pos)))\n pos = next_pos\n file_handler.close()\n return chunks", "def split(self):\n\n # FIXME: user should be able to change the default behavior of\n # this function (for instance user may require one filter not\n # to split the content of the input file and the same input \n # to be used by the next filter.\n \n utils.split_file(self.files['hit_ids'],\n self.files['input'],\n self.files['filtered_reads'],\n self.files['survived_reads'])", "def _split_generators(self, dl_manager: tfds.download.DownloadManager):\n \n download_server = environ.get('ROAD_OBSTACLE_URL')\n if download_server is None:\n raise RuntimeError('Please specify server URL as ROAD_OBSTACLE_URL env variable.')\n\n download_url = download_server + \"/dataset_RoadObstacle_0.0.3.zip\"\n download_dir = dl_manager.download_and_extract(download_url)\n\n data_dir = Path(download_dir) / 'dataset_RoadObstacle'\n\n splits = json.loads((data_dir / 'splits.json').read_text())\n\n make_split_entry = lambda name, key: SplitGenerator(\n name=name, \n gen_kwargs = dict(data_dir=str(data_dir), split=key)\n )\n\n return [\n make_split_entry(tfds.Split.TEST, 'full')\n ] + [\n make_split_entry(k, k)\n for k in sorted(splits.keys())\n ]", "def split_data_set(reddit_path, data_set_name, on, num_splits, target_directories, map_columns=None):\n targets = {}\n for i in range(num_splits):\n targets[i] = os.path.join(target_directories[i], data_set_name)\n mkdir(targets[i])\n\n full_sub_data_path = os.path.join(reddit_path, data_set_name)\n data_files = map(lambda f: os.path.join(full_sub_data_path, f), os.listdir(full_sub_data_path))\n args_list = [(on, table_file, targets, num_splits, map_columns) for table_file in data_files]\n\n pool = mp.Pool(pool_size)\n pool.map(unpack_split_file_with_map, args_list)", "def split(self):\n overall_chunks = []\n for filename in self.get_all_files():\n file_chunks = self.split_single_file(filename)\n overall_chunks.extend(file_chunks)\n return overall_chunks", "def go(self):\n num_fofs = self.fofs['fofid'].max()\n fof_splits = split.get_splits(num_fofs, self['chunksize'])\n\n for isplit,fof_split in enumerate(fof_splits):\n logger.info('%s %s' % (isplit,fof_split))\n self._write_split(isplit, fof_split)", "def splitter(file_name: str, MAX_SIZE: int = 7):\n\n # convertion to MB\n MAX_SIZE = MAX_SIZE * 1024 * 1024\n\n # index go throught the bit stream\n start_index: int = 0\n\n # harvested data\n data: bytes = None\n\n created_files: int = 0\n\n with open(file_name, \"rb\") as input_stream:\n # while we didn't go out the file\n while data != b'':\n # we place the cursor at start index\n input_stream.seek(start_index)\n # read a chunk of size MAX_SIZE bytes\n data = input_stream.read(MAX_SIZE)\n\n if data == b'':\n break\n # then we open an output file\n with open(str(start_index) + \"_\" + file_name, \"wb\") as ouput_stream:\n # A write the related chunk in it\n ouput_stream.write(data)\n\n created_files += 1\n\n # we translate the cursor\n start_index += MAX_SIZE\n\n print(\"Done! \", created_files, \" files created\")", "def chunk_input(self, input_files, chunksize):\n part_lists = [] # Lists of partial files\n known_nlines = None\n part_suffix = \"\"\n chunk_nlines = chunksize * 2\n\n for input_file in input_files:\n # Count number of lines in the file\n nlines = int(command.execute_with_output(\"wc -l %s\" % input_file)\n .strip().split()[0])\n # Number of lines should be the same in paired files\n if known_nlines is not None:\n msg = \"Mismatched line counts in supposedly paired files: {}\".format(\n input_files)\n assert nlines == known_nlines, msg\n known_nlines = nlines\n\n # Set number of pieces and names\n numparts = (nlines + chunk_nlines - 1) // chunk_nlines\n ndigits = len(str(numparts - 1))\n part_suffix = \"-chunksize-%d-numparts-%d-part-\" % (chunksize, numparts)\n out_prefix_base = os.path.basename(input_file) + part_suffix\n out_prefix = os.path.join(self.chunks_result_dir_local, out_prefix_base)\n\n # Split large file into smaller named pieces\n command.execute(\"split -a %d --numeric-suffixes -l %d %s %s\" %\n (ndigits, chunk_nlines, input_file, out_prefix))\n command.execute_with_retries(f\"aws s3 sync --only-show-errors {self.chunks_result_dir_local}/ {self.chunks_result_dir_s3}/ --exclude '*' --include '{out_prefix_base}*'\")\n\n # Get the partial file names\n partial_files = []\n paths = command.execute_with_output(\"ls %s*\" % out_prefix).rstrip().split(\"\\n\")\n for pf in paths:\n partial_files.append(os.path.basename(pf))\n\n # Check that the partial files match our expected chunking pattern\n pattern = \"{:0%dd}\" % ndigits\n expected_partial_files = [(out_prefix_base + pattern.format(i))\n for i in range(numparts)]\n msg = \"something went wrong with chunking: {} != {}\".format(\n partial_files, expected_partial_files)\n assert expected_partial_files == partial_files, msg\n part_lists.append(partial_files)\n\n # Ex: [[\"input_R1.fasta-part-1\", \"input_R2.fasta-part-1\"],\n # [\"input_R1.fasta-part-2\", \"input_R2.fasta-part-2\"],\n # [\"input_R1.fasta-part-3\", \"input_R2.fasta-part-3\"],...]\n input_chunks = [list(part) for part in zip(*part_lists)]\n return part_suffix, input_chunks", "def split_batches(filenames):\n by_time = {}\n for path_name in filenames:\n file_name = path.basename(path_name)\n parsed_fn = parse_agdc_fn(file_name)\n dt = parsed_fn['datetime']\n by_time.setdefault(dt, []).append((path_name, parsed_fn))\n\n rv = list(by_time.values())\n\n for group in rv:\n # Will raise exception if group is non-homogeneous\n check_sane(parsed for _, parsed in group)\n\n return rv", "def splitMerge(self):\n\t\tpath_merge = self.aug_merge_path\n\t\tpath_train = self.aug_train_path\n\t\tpath_label = self.aug_label_path\n\t\tfor i in range(self.slices):\n\t\t\tpath = path_merge + \"/\" + str(i)\n\t\t\ttrain_imgs = glob.glob(path+\"/*.\"+self.img_type)\n\t\t\tsavedir = path_train + \"/\" + str(i)\n\t\t\tif not os.path.lexists(savedir):\n\t\t\t\tos.mkdir(savedir)\n\t\t\tsavedir = path_label + \"/\" + str(i)\n\t\t\tif not os.path.lexists(savedir):\n\t\t\t\tos.mkdir(savedir)", "def split_standard(path: str, export_files: Sequence[str]):\n fn1 = [100, 100, 100]\n fn2 = [100, 500]\n fn3 = [100]\n fn4 = [100, 100]\n\n max_size = 500\n constants.debug_set_max_size(max_size)\n\n graph_def = test_util.make_graph_def_with_constant_nodes(\n STANDARD_SIZES, fn1=fn1, fn2=fn2, fn3=fn3, fn4=fn4\n )\n proto = saved_model_pb2.SavedModel()\n proto.meta_graphs.add().graph_def.CopyFrom(graph_def)\n\n _split_and_write(path, proto, max_size, export_files)", "def extract_chunks(the_files, the_bands=None):\n ds_config = {}\n gdal_ptrs = []\n datatypes = []\n for the_file in the_files:\n g = gdal.Open(the_file)\n gdal_ptrs.append(gdal.Open(the_file))\n datatypes.append(GDAL2NUMPY[g.GetRasterBand(1).DataType])\n\n block_size = g.GetRasterBand(1).GetBlockSize()\n nx = g.RasterXSize\n ny = g.RasterYSize\n if the_bands is None:\n the_bands = np.arange(g.RasterCount) + 1\n proj = g.GetProjectionRef()\n geoT = g.GetGeoTransform()\n ds_config['nx'] = nx\n ds_config['ny'] = ny\n ds_config['nb'] = g.RasterCount\n ds_config['geoT'] = geoT\n ds_config['proj'] = proj\n block_size = [block_size[0]*2, block_size[1]*2]\n print(\"Blocksize is (%d,%d)\" % (block_size[0], block_size[1]))\n # block_size = [ 256, 256 ]\n # store these numbers in variables that may change later\n nx_valid = block_size[0]\n ny_valid = block_size[1]\n # find total x and y blocks to be read\n nx_blocks = (int)((nx + block_size[0] - 1) / block_size[0])\n ny_blocks = (int)((ny + block_size[1] - 1) / block_size[1])\n buf_size = block_size[0] * block_size[1]\n ################################################################\n # start looping through blocks of data\n ################################################################\n # loop through X-lines\n for X in range(nx_blocks):\n # change the block size of the final piece\n if X == nx_blocks - 1:\n nx_valid = nx - X * block_size[0]\n buf_size = nx_valid * ny_valid\n\n # find X offset\n this_X = X * block_size[0]\n\n # reset buffer size for start of Y loop\n ny_valid = block_size[1]\n buf_size = nx_valid * ny_valid\n\n # loop through Y lines\n for Y in range(ny_blocks):\n # change the block size of the final piece\n if Y == ny_blocks - 1:\n ny_valid = ny - Y * block_size[1]\n buf_size = nx_valid * ny_valid\n\n # find Y offset\n this_Y = Y * block_size[1]\n data_in = []\n for ig, ptr in enumerate(gdal_ptrs):\n buf = ptr.ReadRaster(this_X, this_Y, nx_valid, ny_valid,\n buf_xsize=nx_valid, buf_ysize=ny_valid,\n band_list=the_bands)\n a = np.frombuffer(buf, dtype=datatypes[ig])\n data_in.append(a.reshape((\n len(the_bands), ny_valid, nx_valid)).squeeze())\n\n yield (ds_config, this_X, this_Y, nx_valid, ny_valid,\n data_in)", "def test150EventMultipleFileSplit(self):\n splitter = SplitterFactory()\n jobFactory = splitter(self.multipleFileSubscription)\n\n jobGroups = jobFactory(events_per_job=150,\n performance=self.performanceParams)\n\n self.assertEqual(len(jobGroups), 1)\n\n self.assertEqual(len(jobGroups[0].jobs), 10)\n\n for job in jobGroups[0].jobs:\n self.assertEqual(len(job.getFiles(type=\"lfn\")), 1)\n self.assertEqual(job[\"mask\"].getMaxEvents(), self.eventsPerJob)\n self.assertEqual(job[\"mask\"][\"FirstEvent\"], 0)\n self.assertEqual(job[\"mask\"][\"LastEvent\"], 99)", "def _split_on_chunks(self, iterable, size):\n return utils.split_on_chunks(iterable, size)", "def divide_chunks(audio_file_, chunksize):\n\n for j in range(0, len(audio_file_), self.chunksize):\n yield audio_file[j:j + chunksize]", "def split_blob(blob):\n for match in split_marc.finditer(blob):\n yield match.group()" ]
[ "0.6776535", "0.6658732", "0.6537813", "0.641983", "0.6385792", "0.6284475", "0.6230259", "0.6148328", "0.6138635", "0.61125404", "0.60882944", "0.6068976", "0.6046665", "0.60240245", "0.5992238", "0.59826624", "0.596513", "0.595883", "0.59121525", "0.5833727", "0.5827267", "0.58196497", "0.5780511", "0.5749509", "0.57488644", "0.57436967", "0.5742002", "0.57196987", "0.57101506", "0.5708861" ]
0.70029205
0
Returns a merged sidewalk network Takes two sidewalk networks and merges them without duplicating sidewalk data
def merge_sidewalks(sidewalk_network1, sidewalk_network2): for node in sidewalk_network1.nodes.get_list(): node.confirmed = True ''' # add new nodes from sidewalk_network2 to sidewalk_network1 for sidewalk_node in sidewalk_network2.nodes.get_list(): in_other = False same_node = None for other_sidewalk_node in sidewalk_network1.nodes.get_list(): if sidewalk_node.location() == other_sidewalk_node.location(): in_other = True same_node = other_sidewalk_node if not in_other: # If street network 2 contains the node but street network 1 does not sidewalk_network1.add_node(sidewalk_node) # Add node from street network 2 to street network 1 else: # If both networks contain the node sidewalk_network2.nodes.update(sidewalk_node.id, same_node) ''' # add new nodes from sidewalk_network2 to sidewalk_network1 network1_dict = {} for sidewalk_node in sidewalk_network1.nodes.get_list(): network1_dict[sidewalk_node.location] = sidewalk_node for sidewalk_node in sidewalk_network2.nodes.get_list(): if sidewalk_node.location not in network1_dict: sidewalk_network1.add_node(sidewalk_node) else: sidewalk_network2.nodes.update(sidewalk_node.id, network1_dict[sidewalk_node.location]) # add new ways from sidewalk_network2 to sidewalk_network1 for way in sidewalk_network2.ways.get_list(): # ensure all ways have correct nids, if incorrect update to correct nid from network1 for nid in way.get_node_ids(): if sidewalk_network1.nodes.get(nid) is None: way.swap_nodes(nid, sidewalk_network2.nodes.get(nid).id) has_confirmed_parents = False for nid in way.get_node_ids(): if sidewalk_network1.nodes.get(nid).confirmed: has_confirmed_parents = True if not has_confirmed_parents: sidewalk_network1.add_way(way) return sidewalk_network1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_networks_in_series(n1, n2):\n new_l_size = n1.l_size + n2.l_size + 1 # One additional vertex in between.\n new_u_size = n1.u_size + n2.u_size\n\n # Connect the 0-pole and the inf-pole in the result network.\n new_link_edge = n1.zero_pole.insert_before()\n new_link_edge_opp = n2.inf_pole.insert_after()\n new_link_edge.opposite = new_link_edge_opp\n new_link_edge_opp.opposite = new_link_edge\n\n # Merge the 0-pole of n1 with the inf-pole of n2.\n n1.inf_pole.insert_all_after(n2.zero_pole)\n\n # Remove the link edges in n1 and n2 if they are not real.\n if not n1.is_linked:\n n1.zero_pole.remove()\n n1.inf_pole.remove()\n if not n2.is_linked:\n n2.zero_pole.remove()\n n2.inf_pole.remove()\n\n # After a serial merge the poles are never linked.\n res = Network(new_link_edge, is_linked=False, l_size=new_l_size, u_size=new_u_size)\n res.type = 'S'\n return res\n\n # # Extract the poles from both networks.\n # first_net_zero_pole_edge = n1.zero_pole\n # first_net_inf_pole_edge = n1.inf_pole\n #\n # second_net_zero_pole_edge = n2.zero_pole\n # second_net_inf_pole_edge = n2.inf_pole\n #\n # # Create a new half edges for connecting the poles of the network. The\n # # edge will not be part from the edges list.\n # new_root_half_edge = first_net_zero_pole_edge.insert_after()\n # new_root_opposite = second_net_inf_pole_edge.insert_after()\n #\n # new_root_half_edge.opposite = new_root_opposite\n # new_root_opposite.opposite = new_root_half_edge\n #\n # # Get the half edges from both networks for merging\n # first_net_inf_pole_prior = first_net_inf_pole_edge.prior\n # second_net_zero_pole_edge_prior = second_net_zero_pole_edge.prior\n #\n # # Merge the both networks so that the inf-pole from the first network is\n # # identified with the zero-pole from the second one. Handling different\n # # while merging the two networks.\n # first_net_inf_pole_edge.prior = second_net_zero_pole_edge_prior\n # second_net_zero_pole_edge_prior.next = first_net_inf_pole_edge\n #\n # first_net_inf_pole_prior.next = second_net_zero_pole_edge\n # second_net_zero_pole_edge.prior = first_net_inf_pole_prior\n #\n # # Update the node numbers in the second network zero-pole edges\n # half_edge_walker = first_net_inf_pole_prior.next\n # while half_edge_walker != first_net_inf_pole_prior:\n # half_edge_walker.node_nr = first_net_inf_pole_edge.node_nr\n # half_edge_walker = half_edge_walker.next\n #\n # # Check whether the original poles of the network that are merged are\n # # linked or not. If they are not linked then the corresponding half\n # # edges between them have to be removed.\n # if not n1.is_linked:\n # # Remove the half edges between the zero and inf pole from the first\n # # network.\n # first_net_zero_pole_edge.remove()\n # first_net_inf_pole_edge.remove()\n #\n # if not n2.is_linked:\n # # Remove the half edges between the zero and inf pole from the first\n # # network.\n # second_net_zero_pole_edge.remove()\n # second_net_inf_pole_edge.remove()\n #\n # # After a serial merge the poles are never linked.\n # res = Network(new_root_half_edge, is_linked=False,\n # l_size=new_l_size, u_size=new_u_size)\n # res.type = 'S'\n # return res", "def merge_networks_in_parallel(n1, n2):\n # This operation is not defined if both networks are linked.\n assert not (n1.is_linked and n2.is_linked), (n1, n2)\n\n if n1.is_linked:\n return merge_networks_in_parallel(n2, n1)\n\n # Either n2 is linked and n1 not or both are not linked.\n assert not n1.is_linked\n\n new_l_size = n1.l_size + n2.l_size\n new_u_size = n1.u_size + n2.u_size\n res_is_linked = n1.is_linked or n2.is_linked\n\n # Merge 0-poles.\n n1.zero_pole.insert_all_before(n2.zero_pole.prior)\n\n # Merge inf-poles.\n n1.inf_pole.insert_all_after(n2.inf_pole.next)\n\n # Remove the link edge in n1\n n1.zero_pole.remove()\n n1.inf_pole.remove()\n\n res = Network(n2.zero_pole, res_is_linked, new_l_size, new_u_size)\n res.type = 'P'\n return res\n\n # # Merge their 0-poles.\n # first_net_zero_pole_prior = first_net_zero_pole_edge.prior\n # second_net_zero_pole_next = second_net_zero_pole_edge.next\n # second_net_zero_pole_prior = second_net_zero_pole_edge.prior\n # first_net_zero_pole_edge.prior = second_net_zero_pole_prior\n # second_net_zero_pole_prior.next = first_net_zero_pole_edge\n # first_net_zero_pole_prior.next = second_net_zero_pole_next\n # second_net_zero_pole_next.prior = first_net_zero_pole_prior\n #\n # # Update the node numbers in the zero pole.\n # half_edge_walker = first_net_zero_pole_edge.next\n # while half_edge_walker != first_net_zero_pole_edge:\n # half_edge_walker.node_nr = first_net_zero_pole_edge.node_nr\n # half_edge_walker = half_edge_walker.next\n\n # # Merge their inf-poles\n # first_net_inf_pole_next = first_net_inf_pole_edge.next\n # second_net_inf_pole_prior = second_net_inf_pole_edge.prior\n # second_net_inf_pole_next = second_net_inf_pole_edge.next\n # first_net_inf_pole_edge.next = second_net_inf_pole_next\n # second_net_inf_pole_next.prior = first_net_inf_pole_edge\n # first_net_inf_pole_next.prior = second_net_inf_pole_prior\n # second_net_inf_pole_prior.next = first_net_inf_pole_next\n #\n # # Update the node numbers in the inf pole\n # half_edge_walker = first_net_inf_pole_edge.next\n # while half_edge_walker != first_net_inf_pole_edge:\n # half_edge_walker.node_nr = first_net_inf_pole_edge.node_nr\n # half_edge_walker = half_edge_walker.next", "def merge_graph(self, other):\n self.add_nodes( (nLabel,nInfo) for nLabel,nInfo in other.nodes() )\n \n for nLabel,nInfo in other.nodes():\n for edgeLabel,edgeInfo in other.edgesFrom(nLabel):\n self.add_edge(edgeLabel,edgeInfo)", "def mix_graphs(source_graph1, source_graph2):\n g = clone_graph(source_graph1, identifier=source_graph1.identifier)\n g = clone_graph(source_graph2, target_graph=g)\n return g", "def merge(self, g1, g2):\n logger = logging.getLogger(__name__)\n \n \n g = BaseGraph()\n g.copy_graph_from(g1)\n\n plwn2sumo_dict = defaultdict(set)\n plwn2sumo_dict = self.get_plwn2sumo_dict()\n\n synset_on_vertex_dict = {}\n for node in g.all_nodes():\n synset_id = node.synset.synset_id\n if synset_id in synset_on_vertex_dict:\n logger.warning(\"ID of some synset is not unique.\")\n continue\n synset_on_vertex_dict[synset_id] = node\n\n num_of_edge = 0\n for edge in g2.all_edges():\n num_of_edge += 1\n logger.info(\"%d/%d\", num_of_edge, g2.num_edges())\n\n parent_sumo_concept = edge.source().sumo\n child_sumo_concept = edge.target().sumo\n\n if parent_sumo_concept not in plwn2sumo_dict:\n logger.warning(\"The mapping file doesn't contain sumo concept '%s'.\", parent_sumo_concept)\n continue\n if child_sumo_concept not in plwn2sumo_dict:\n logger.warning(\"The mapping file doesn't contain sumo concept '%s'.\", child_sumo_concept)\n continue\n\n for parent_syn_id in plwn2sumo_dict[parent_sumo_concept]:\n if parent_syn_id not in synset_on_vertex_dict:\n logger.warning(\"The mapping file contains synset '%d' that is not in the graph.\", parent_syn_id)\n continue\n p_node = synset_on_vertex_dict[parent_syn_id]\n for child_syn_id in plwn2sumo_dict[child_sumo_concept]:\n if child_syn_id not in synset_on_vertex_dict:\n logger.warning(\"The mapping file contains synset '%d' that is not in the graph.\", child_syn_id)\n continue\n ch_node = synset_on_vertex_dict[child_syn_id]\n \n g.add_edge(p_node,\n ch_node,\n [(\"rel\", edge.rel)],\n simply=True)\n \n\n return g", "def concatenate_graphs(G1, G2):\n V = G1.V + G2.V\n edges = np.vstack((G1.edges, G1.V + G2.edges))\n weights = np.hstack((G1.weights, G2.weights))\n G = WeightedGraph(V, edges, weights)\n return G", "def merge_nffgs (cls, target, new, log=logging.getLogger(\"UNION\")):\n # Copy Infras\n target = cls._copy_node_type_with_flowrules(new.infras, target, log)\n # Copy NFs\n target = cls._copy_node_type(new.nfs, target, log)\n # Copy SAPs\n target = cls._copy_node_type(new.saps, target, log)\n\n # Copy remaining links which should be valid\n for u, v, link in new.network.edges_iter(data=True):\n if not target.network.has_edge(u, v, key=link.id):\n src_port = target.network.node[u].ports[link.src.id]\n dst_port = target.network.node[v].ports[link.dst.id]\n c_link = deepcopy(link)\n c_link.src = src_port\n c_link.dst = dst_port\n target.add_link(src_port=src_port, dst_port=dst_port, link=c_link)\n log.debug(\"Copy Link: %s\" % c_link)\n return target", "def merge(cls, frame1, frame2):\n\t\treturn cls(list(set(frame1.srcList+frame2.srcList)), list(set(frame1.tgtList+frame2.tgtList)), frame1.srcTree, frame1.tgtTree)", "def side_renaming(network1, network2):\n\n # There is probably faster way to perform this, optimize later if needed\n for i in range(len(network1.nodes)):\n \n if (network1.nodes[i][\"group\"] == \"#fcae91FF\"):\n network1.nodes[i][\"T1\"] = \"0\"\n\n elif (network1.nodes[i][\"group\"] == \"#7828a0FF\"):\n network1.nodes[i][\"T1\"] = \"1\"\n \n else:\n print(\"Error with group encoding!\")\n \n \n for i in range(len(network2.nodes)):\n \n if (network2.nodes[i][\"group\"] == \"#fcae91FF\"):\n network2.nodes[i][\"T2\"] = \"0\"\n \n elif (network2.nodes[i][\"group\"] == \"#7828a0FF\"):\n network2.nodes[i][\"T2\"] = \"1\"\n \n else:\n print(\"This should not be printed! Error with group encoding!\")\n\n return network1, network2", "def merge_nw_nnw(self):\n nw_nnw = {}\n nw_dict = self.ontology.heirs_network_dictionary\n nnw_dict = self.model.networks_w_namednw_dict\n for label, things in nw_dict.items():\n nw_nnw[label] = things\n if label in nnw_dict.keys():\n nw_nnw[label] = set(nnw_dict[label])\n return nw_nnw", "def graph_union(*args, **kwargs):\n\n if not len(args) > 1:\n raise AttributeError('At least two input Graphs required')\n\n # Validate if all arguments are Graphs\n check_graphbase_instance(*args)\n\n all_share_common_origin = all([share_common_origin(args[0], n) for n in args[1:]])\n if all_share_common_origin and not kwargs.get('return_copy', False):\n\n nids = []\n for graph in args:\n nids.extend([n for n in graph.nodes if n not in nids])\n\n eids = []\n for graph in args:\n eids.extend([e for e in graph.edges if e not in eids])\n\n result = args[0].origin.getnodes(nids)\n result.edges.set_view(eids)\n return result\n else:\n\n # make a deep copy of the first graph\n result = args[0].copy(deep=True, copy_view=False)\n\n # we need control over the node ID to add\n # temporary turn off auto_nid if needed\n auto_nid = result.data.auto_nid\n result.data.auto_nid = False\n\n for graph in args[1:]:\n for node, attrib in graph.nodes.items():\n if node not in result.nodes:\n result.add_node(node, **attrib)\n\n for edge, attrib in graph.edges.items():\n if edge not in result.edges:\n result.add_edge(*edge, **attrib)\n\n # Restore auto_nid\n result.data.auto_nid = auto_nid\n\n return result", "def testMergeNoEdges():\n\n n1 = DummyNode(x=1, y=2, z=4)\n n2 = DummyNode(x=1, y=2, z=3)\n\n assert n1.z == 4\n\n n1.merge_with(n2)\n\n assert n1.z == 3", "def connect_crosswalk_nodes(sidewalk_network, crosswalk_node_ids):\n # crosswalk_node_ids = crosswalk.get_node_ids()[:-1] # Crosswalk has a redundant node at the end.\n\n for crosswalk_node_id in crosswalk_node_ids[:-1]:\n try:\n # Get the intersection node and two nodes that created the intersection sidewalk node\n crosswalk_node = sidewalk_network.nodes.get(crosswalk_node_id)\n intersection_node, adjacent_street_node1, adjacent_street_node2 = crosswalk_node.parents\n\n # Connect sidewalk nodes created from adjacent_street_node1 and adjacent_street_node2\n # Get sidewalk nodes that are created from the street node, and\n # identify which one should be connected to crosswalk_node\n for adjacent_street_node in [adjacent_street_node1, adjacent_street_node2]:\n # Skip the dummy node\n if len(adjacent_street_node.get_way_ids()) == 0:\n continue\n\n # Create a vector from the intersection node to the adjacent street node.\n # Then also create a vector from the intersection node to the sidewalk node\n v_adjacent_street_node = intersection_node.vector_to(adjacent_street_node, normalize=True)\n shared_street_id = intersection_node.get_shared_way_ids(adjacent_street_node)[0]\n try:\n sidewalk_node_1_from_intersection, sidewalk_node_2_from_intersection = intersection_node.get_sidewalk_nodes(shared_street_id)\n except TypeError:\n # Todo: Issue #29. Sometimes shared_street_id does not exist in the intersection_node.\n log.exception(\"connect_crosswalk_nodes(): shared_street_id %s does not exist.\" % shared_street_id)\n continue\n v_sidewalk_node_1_from_intersection = intersection_node.vector_to(sidewalk_node_1_from_intersection, normalize=True)\n\n # Check which one of sidewalk_node_1_from_intersection and sidewalk_node_2_from_intersection are\n # on the same side of the road with crosswalk_node.\n # If the sign of the cross product from v_adjacent_street_node to v_crosswalk_node is same as\n # that of v_adjacent_street_node to v_sidewalk_node_1_from_intersection, then\n # sidewalk_node_1_from_intersection should be on the same side.\n # Otherwise, sidewalk_node_2_from_intersection should be on the same side with crosswalk_node.\n v_crosswalk_node = intersection_node.vector_to(crosswalk_node, normalize=True)\n if np.cross(v_adjacent_street_node, v_crosswalk_node) * np.cross(v_adjacent_street_node, v_sidewalk_node_1_from_intersection) > 0:\n node_to_swap = sidewalk_node_1_from_intersection\n else:\n node_to_swap = sidewalk_node_2_from_intersection\n\n sidewalk_network.swap_nodes(node_to_swap, crosswalk_node)\n except ValueError:\n log.exception(\"Error while connecting crosswalk nodes, so skipping...\")\n continue\n return", "def merge(list1, list2):\n holding = list1.to_list()\n [holding.append(i) for i in list2.to_list()]\n # for i in list2.to_list():\n # holding.append(i)\n holding = sorted(holding)\n\n output = LinkedList(Node(holding[0]))\n for i in holding[1:]:\n output.append(i)\n return output", "def _merge_mapper(mapper1, mapper2):\n if len(mapper1) > 0:\n if len(mapper2) > 0:\n clusters1 = mapper1['cluster']\n clusters2 = mapper2['cluster']\n clusters = np.unique(np.concatenate((clusters1, clusters2), 0))\n\n mapper1['cluster'] = clusters\n mapper1['links'] += mapper2['links']\n else:\n mapper1 = mapper2\n return mapper1", "def merge(self):\n leftSon = self.nodes.getNode(0)\n rightSon = self.nodes.getNode(1)\n self.nodes.removeNode(0)\n self.nodes.removeNode(0)\n self.nodes.addNode(Node(leftSon, rightSon))", "def merge(self, other):\n\n assert self.ins_addr == other.ins_addr\n assert self.type == other.type\n\n o = self.copy()\n o.targets |= other.targets\n\n return o", "def direct_network(self):\n #print list(self.get_subgraphs())\n graphs = [self._depth_first_directed(g) for g in self.get_subgraphs()]\n self._network = reduce(lambda a, b: nx.union(a, b), graphs)", "def getTwoNodesNoConnectionGraph(self) -> LGraph:\n layer = self.makeLayer()\n self.addNodeToLayer(layer)\n self.addNodeToLayer(layer)\n return self.graph", "def merge(self):\n collapsable = self.findSameSubtrees()\n\n dummy = GraphiusNode(-1, None)\n for i, node in self.nodes.items():\n dummy.addNeighbor(node)\n\n # Perform the merge\n self.mergeHelper(dummy, collapsable)\n\n # Regenerate trees\n newNodes = self.dfs(dummy)\n assert(-1 not in newNodes)\n self.nodes = newNodes", "def sub_graph_merging(self):", "def merge_nodes(self,n0,n1):\n # -- Sanity checks - does not yet allow for collapsing edges.\n\n # if they share any cells, would update the cells, but for now\n # just signal failure.\n n0_cells=list(self.node_to_cells(n0))\n n1_cells=list(self.node_to_cells(n1))\n cell_to_edge_cache={}\n\n for c in n1_cells:\n if c in n0_cells:\n print(\"cell %d common to both nodes\"%c)\n raise GridException(\"Not ready for merging nodes in the same cell\")\n # otherwise record and fix up below\n\n # while we're looping, cache the edges as they will\n # be mutated along the way.\n cell_to_edge_cache[c]=self.cell_to_edges(c).copy()\n\n # do they share an edge, but not already fixed in the above stanza?\n j=self.nodes_to_edge(n0,n1)\n if j is not None:\n raise GridException(\"Not ready for merging endpoints of an edge\")\n\n edge_map={} # index of superceded edge => superceding edge\n\n # Update edges of n1 to point to n0\n # if that would cause a duplicate edge, then the n1 version is deleted\n n1_edges=list(self.node_to_edges(n1)) # make copy since we'll mutate it\n for j in n1_edges:\n if self.edges['nodes'][j,0]==n1:\n nj=0\n elif self.edges['nodes'][j,1]==n1:\n nj=1\n else:\n assert False # sanity check\n newnodes=self.edges[j]['nodes'].copy()\n newnodes[nj]=n0\n # it's possible that this is an edge which already exists\n jother=self.nodes_to_edge(*newnodes)\n if jother is not None:\n # want to keep jother, delete j. but is there info on\n # cells which should be brought over?\n edge_map[j]=jother\n # wait to delete j until after cells have been moved to jother.\n else:\n self.log.debug(\"Modifying edge j=%d\"%j)\n self.modify_edge(j,nodes=newnodes)\n\n # -- Transition any cells. \n for c in n1_cells:\n # update the node list:\n cnodes=self.cell_to_nodes(c).copy()\n nc=list(cnodes).index(n1)\n cnodes[nc]=n0\n\n # Dangerous to use cell_to_edges, since it may\n # have to consult the edge topology, which is disrupted\n # in the above code. \n # cell_to_edges: first checks cells['edges'], may \n # go to cell_to_nodes(c): that's safe.\n # and nodes_to_edge\n # -> node_to_edges, which in turn may consult self.edges['nodes']\n\n #cedges=self.cell_to_edges(c).copy()\n cedges=cell_to_edge_cache[c]\n\n for ji,j in enumerate(cedges):\n if j in edge_map:\n # is there were edges['cells'] should be updated?\n\n # sever the edge=>cell pointer, to p\n # could just set to [-1,-1], but this keeps things very explicit\n # for debugging\n j_cells=list(self.edges['cells'][j])\n j_cells_side=j_cells.index(c)\n j_cells[ j_cells_side ] = -1\n self.modify_edge(j,cells=j_cells)\n\n # and modify the receiving edge, too\n jo=edge_map[j]\n jo_cells=list(self.edges['cells'][jo])\n # which side of jo? a bit tedious...\n if list(self.edges['nodes'][j]).index(n1) == list(self.edges['nodes'][jo]).index(n0):\n # same orientation\n jo_cells_side=j_cells_side\n elif list( self.edges['nodes'][j]).index(n1) == 1-list(self.edges['nodes'][jo]).index(n0):\n jo_cells_side=1-j_cells_side\n else:\n raise Exception(\"Failed in some tedium\")\n assert jo_cells[jo_cells_side]<0\n jo_cells[jo_cells_side]=c\n self.modify_edge(edge_map[j],cells=jo_cells)\n # yikes. any chance that worked?\n\n cedges[ji]=edge_map[j]\n\n # maybe this is where we'd update cells['edges'] too?\n self.modify_cell(c,nodes=cnodes,edges=cedges)\n\n for dead_edge in edge_map:\n self.delete_edge(dead_edge)\n\n self.delete_node(n1)", "def build_network(spectra_to_cluster_1: dict, spectra_to_cluster_2: dict, source1: str, source2: str):\n graph = nx.Graph()\n\n # add all clusters as nodes\n graph.add_nodes_from(set(spectra_to_cluster_1.values()), source=\"source1\", source_label=source1)\n graph.add_nodes_from(set(spectra_to_cluster_2.values()), source=\"source2\", source_label=source2)\n\n # add the edges\n for spec_id_1 in spectra_to_cluster_1.keys():\n if spec_id_1 in spectra_to_cluster_2:\n cluster_1 = spectra_to_cluster_1[spec_id_1]\n cluster_2 = spectra_to_cluster_2[spec_id_1]\n\n try:\n graph.edges[cluster_1, cluster_2][\"weight\"] += 1\n except KeyError:\n graph.add_edge(cluster_1, cluster_2, weight=1)\n\n return graph", "def merge(self, other):\n\n for n in other.cfg_nodes:\n self.insert_cfgnode(n)\n\n for ins_addr, outs in other.out_branches.items():\n if ins_addr in self.out_branches:\n for stmt_idx, item in outs.items():\n if stmt_idx in self.out_branches[ins_addr]:\n self.out_branches[ins_addr][stmt_idx].merge(item)\n else:\n self.out_branches[ins_addr][stmt_idx] = item\n\n else:\n item = next(iter(outs.values()))\n self.out_branches[ins_addr][item.stmt_idx] = item", "def merge_networks(network, donor=[]):\n if isinstance(donor, list):\n donors = donor\n else:\n donors = [donor]\n\n # First fix up geometries\n # main_proj = network.project\n # main_geoms = main_proj.geometries()\n for donor in donors:\n proj = donor.project\n geoms = proj.geometries().values()\n for geo in geoms:\n if geo.name in network.project.names:\n geo.name = network.project._generate_name(geo)\n network.project.append(geo)\n\n for donor in donors:\n network['pore.coords'] = np.vstack((network['pore.coords'],\n donor['pore.coords']))\n network['throat.conns'] = np.vstack((network['throat.conns'],\n donor['throat.conns']\n + network.Np))\n p_all = np.ones((np.shape(network['pore.coords'])[0],), dtype=bool)\n t_all = np.ones((np.shape(network['throat.conns'])[0],), dtype=bool)\n network.update({'pore.all': p_all})\n network.update({'throat.all': t_all})\n for key in set(network.keys()).union(set(donor.keys())):\n if key.split('.')[1] not in ['conns', 'coords', '_id', 'all']:\n if key in network.keys():\n pop_flag = False\n # If key not on donor add it first with dummy values to\n # simplify merging later\n if key not in donor.keys():\n logger.debug('Adding ' + key + ' to donor')\n if network[key].dtype == bool: # Deal with labels\n donor[key] = False\n else: # Deal with numerical data\n element = key.split('.')[0]\n shape = list(network[key].shape)\n N = donor._count(element)\n shape[0] = N\n donor[key] = np.empty(shape=shape)*np.nan\n pop_flag = True\n # Then merge it with existing array on network\n if len(network[key].shape) == 1:\n temp = np.hstack((network[key], donor[key]))\n else:\n temp = np.vstack((network[key], donor[key]))\n network[key] = temp\n if pop_flag:\n donor.pop(key, None)\n else:\n # If key not on network add it first\n logger.debug('Adding ' + key + ' to network')\n if donor[key].dtype == bool:\n network[key] = False\n else:\n data_shape = list(donor[key].shape)\n pore_prop = True if key.split(\".\")[0] == \"pore\" else False\n data_shape[0] = network.Np if pore_prop else network.Nt\n network[key] = np.empty(data_shape) * np.nan\n # Then append donor values to network\n s = np.shape(donor[key])[0]\n network[key][-s:] = donor[key]\n\n # Clear adjacency and incidence matrices which will be out of date now\n network._am.clear()\n network._im.clear()", "def GenDumbbellGraph(n1, n2):\n G = nx.complete_graph(n1)\n H = nx.complete_graph(n2)\n\n mapping = {}\n for i in range(n2):\n mapping[i] = i+n1\n H = nx.relabel_nodes(H, mapping=mapping)\n\n I = nx.union(G,H)\n I.add_edge(n1-1,n1)\n I.weighted = False\n #set weight to 1\n for e in I.edges_iter():\n I.add_edge(e[0],e[1], weight = 1)\n\n print(I.number_of_edges())\n print(I.number_of_nodes())\n \n print(I.edges());\n #Draw(I);\n return I", "def _build_graph2(self, g1):\n g2 = g1.copy()\n for source, target, weight in self._remaining_edges:\n if weight == -1:\n self._gt_edges.append((source, target))\n if g2.has_edge(source, target):\n g2.remove_edge(source, target)\n return g2", "def mergeNodes(nodeA,nodeB,branchLenA,branchLenB):\n newNodeA = (nodeA[0], nodeA[1], nodeA[2], branchLenA)\n newNodeB = (nodeB[0], nodeB[1], nodeB[2], branchLenB)\n new = ('anc',newNodeA,newNodeB,0)\n return new", "def test_merge_outer_multipolygon_way_2():\n data = cache_query(ways=[16001, 16002], deps=True)\n assert data['ways']['16001']['relations'].keys() == ['16001']\n assert data['ways']['16002'] == None\n\n data = cache_query(relations=[16001], full=True)\n assert sorted(data['relations']['16001']['ways'].keys()) == ['16001', '16011']\n\n assert query_row(db_conf, 'osm_landusages', 16001) == None\n park_16001 = query_row(db_conf, 'osm_landusages', -16001)\n assert park_16001['type'] == 'park'\n assert_almost_equal(park_16001['geometry'].area, 12779350582, -1)\n assert query_row(db_conf, 'osm_roads', 16002) == None", "def merge(left, right):\n\n # Create a new lined list that contains nodes from merging left and right\n merged = LinkedList()\n\n # Add a fale head that is discarded later\n merged.add(0)\n\n # set current to the head of the linked list\n current = merged.head\n\n # Obtain head nodes for left and right linked lists\n left_head = left.head\n right_head = right.head\n\n # Iterate over left and right until we reach the tail of either\n while left_head or right_head:\n # if the head node of left is None, we're past the tail\n # Add the noded from right to merged list\n if left_head is None:\n current.next_node = right_head\n # set next on right to set loop condition to False\n right_head = right_head.next_node\n\n # if the head node of right is None, we're past the tail\n # Add the noded from left to merged list\n elif right_head is None:\n current.next_node = left_head\n # set next on left to set loop condition to False\n left_head = left_head.next_node\n else:\n # Not at either tail node\n # Obtain node data to to perforrm comparison operations\n left_data = left_head.data\n right_data = right_head.data\n\n # if data on left is less than right, set current to left node\n if left_data < right_data:\n current.next_node = left_head\n # Move left head to next node\n left_head = left_head.next_node\n # if data on left is greater than right, set current to right node\n else:\n current.next_node = right_head\n # Move right head to next node\n right_head = right_head.next_node\n\n # Move current to next node\n current = current.next_node\n\n # Discard fake head and set first merged mode as head\n head = merged.head.next_node\n merged.head = head\n\n return merged" ]
[ "0.6948328", "0.6779855", "0.6415078", "0.6316667", "0.62008923", "0.6062278", "0.6058839", "0.6043561", "0.6039173", "0.6013152", "0.59606683", "0.59541595", "0.59236413", "0.5908109", "0.59027153", "0.5885178", "0.5791493", "0.57893294", "0.57688636", "0.57563233", "0.57294315", "0.57175994", "0.56919634", "0.56870115", "0.56773716", "0.5617074", "0.56027746", "0.5588543", "0.55773664", "0.5574376" ]
0.7956663
0
Convert a Java object of `SparseVectorWrapper` to a scipy sparse matrix whose number of rows is 1. `j_obj.getSize()` must return a positive number.
def j_sparse_vector_wrapper_to_scipy_spmatrix(j_obj: JavaObject): indices = np.frombuffer(j_obj.getIndicesBytes(), dtype="<i4") values = np.frombuffer(j_obj.getValuesBytes(), dtype="<f8") size = j_obj.getSize() indptr = np.array([0, indices.shape[0]], dtype=np.int32) return csr_matrix((values, indices, indptr), shape=(1, size), dtype=np.float64).todok()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_sparse(self):\n from divisi2.sparse import SparseVector\n return SparseVector(self, self.labels)", "def to_sparse(self):\n from divisi2.sparse import SparseMatrix\n return SparseMatrix(self, self.row_labels, self.col_labels)", "def sparse_matrix(data, stype=\"csr\", dtype=complex):\n return _SPARSE_CONSTRUCTORS[stype](data, dtype=dtype)", "def makesparse(matrix):\n n = matrix[0].size\n elements = []\n for i in range(n):\n for j in range(n):\n if matrix[i][j] != 0 :\n temp = MatrixElement(i, j, matrix[i][j])\n elements.append(temp)\n return SparseMatrix(n, elements)", "def to_sparse(self):\n if self.rep.fmt == 'sparse':\n return self\n\n return self.from_rep(self.rep.to_sdm())", "def ZSparseMatrix2Scipy(matrix):\n data = np.ndarray(matrix.get_value_size(), dtype=float);\n outer_idx = np.ndarray(matrix.get_outer_size(), dtype=np.int32);\n inner_idx = np.ndarray(matrix.get_inner_size(), dtype=np.int32);\n\n matrix.get_values(data);\n matrix.get_outer_indices(outer_idx);\n matrix.get_inner_indices(inner_idx);\n\n return scipy.sparse.csc_matrix((data, inner_idx, outer_idx),\n shape = (matrix.num_rows(), matrix.num_cols()),\n dtype = float);", "def _build_sparse_matrix(L):\n shape = L.shape\n i = torch.LongTensor(np.vstack((L.row, L.col)).astype(int))\n v = torch.FloatTensor(L.data)\n return torch.sparse.FloatTensor(i, v, torch.Size(shape))", "def test_00_create_sparse_1d_array(self):\n ncells = 100\n sparsity = 3.0 # 1 / density\n _, err = _iquery(\"create array SPARSE <v:int64>[i=0:{0}:0:5]\".format(\n ncells - 1))\n assert not err, err\n self._array_cleanups.append('SPARSE')\n _, err = _iquery(\"\"\"\n insert(\n redimension(\n apply(\n build(<i:int64>[fud=0:{0}], {1}*fud),\n (v, 1)),\n SPARSE),\n SPARSE)\"\"\".format(int(ncells / sparsity) - 1,\n int(sparsity)))\n assert not err, err\n check_v_sum('SPARSE')\n nchunks = chunk_count(vaid_of('SPARSE'))\n prt(\"SPARSE has\", nchunks, \"chunks\")", "def sparse_matlab(i, j, v, m, n):\n return csr_matrix((v, (i, j)), shape=(m, n))", "def sparse_matrix(shape, integer=False):\n dtype = numpy.int_ if integer else numpy.float_\n return scipy.sparse.lil_matrix(shape, dtype=dtype)", "def tocsr(self):\n\n indptr = np.asarray([len(x) for x in self.rows], dtype=np.intc)\n indptr = np.concatenate( (np.array([0], dtype=np.intc), np.cumsum(indptr)) )\n\n nnz = indptr[-1]\n\n indices = []\n for x in self.rows:\n indices.extend(x)\n indices = np.asarray(indices, dtype=np.intc)\n\n data = []\n for x in self.data:\n data.extend(x)\n data = np.asarray(data, dtype=self.dtype)\n\n from csr import csr_matrix\n return csr_matrix((data, indices, indptr), shape=self.shape)", "def _convert_to_csr(ref_handle, destroy_original=False):\n\n csr_ref = sparse_matrix_t()\n ret_val = MKL._mkl_sparse_convert_csr(ref_handle, _ctypes.c_int(10), _ctypes.byref(csr_ref))\n\n try:\n _check_return_value(ret_val, \"mkl_sparse_convert_csr\")\n except ValueError:\n try:\n _destroy_mkl_handle(csr_ref)\n except ValueError:\n pass\n\n raise\n\n if destroy_original:\n _destroy_mkl_handle(ref_handle)\n\n return csr_ref", "def make_sparse(sparse_mx, args):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n\n indices = tensor(np.vstack((sparse_mx.row, sparse_mx.col)), args, torch.long)\n values = tensor(sparse_mx.data, args)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def to_csr(self):\n return sparse.csr_matrix((self.data, (self.col, self.row)),\n shape=(self.nrows, self.ncols))", "def scipy_sparse_to_spmatrix(A):\n coo = A.tocoo()\n SP = spmatrix(coo.data.tolist(), coo.row.tolist(), coo.col.tolist(), size=A.shape)\n return SP", "def _identity_sparse(d, stype=\"csr\", dtype=complex):\n return sp.eye(d, dtype=dtype, format=stype)", "def test_01_sparse_to_dataframe(self):\n # Flatten it...\n _, err = _iquery(\"store(flatten(SPARSE, _fast:1), DF1)\")\n assert not err, err\n self._array_cleanups.append('DF1')\n check_v_sum('DF1')\n self._df1_chunks = chunk_count(vaid_of(\"DF1\"))\n prt(\"DF1 has\", self._df1_chunks, \"chunks\")", "def scipy2numpy(scipy_obj):\n\n scipy_mat = scipy_obj.mat()\n size = scipy_mat.getSize()\n np_mat = np.zeros(size)\n\n for i in range(size[0]):\n for j in range(size[1]):\n np_mat[i,j] = scipy_mat.getValue(i,j)\n\n return np_mat", "def to_s_matrix(w,v):\n pass", "def to_sparse(x):\n x_typename = torch.typename(x).split('.')[-1]\n sparse_tensortype = getattr(torch.sparse, x_typename)\n\n indices = torch.nonzero(x)\n if len(indices.shape) == 0: # if all elements are zeros\n return sparse_tensortype(*x.shape)\n indices = indices.t()\n values = x[tuple(indices[i] for i in range(indices.shape[0]))]\n return sparse_tensortype(indices, values, x.size())", "def getSparse(self): # as opposed to makeSparse which keeps the same form and return nothing\n return copy.deepcopy(self.makeSparse())", "def test_csr_from_coo():\n\n from scipy.sparse import random\n\n m = 1000\n n = 500\n\n rng = numpy.random.default_rng(0)\n scipy_coo_mat = random(m, n, format=\"coo\", random_state=rng)\n scipy_csr_mat = scipy_coo_mat.tocsr()\n csr_mat = spmv.CsrMatrix.from_coo(\n scipy_coo_mat.row, scipy_coo_mat.col, scipy_coo_mat.data, (m, n)\n )\n\n vec = rng.normal(size=n)\n\n expected = scipy_csr_mat @ vec\n actual = csr_mat @ vec\n\n numpy.testing.assert_allclose(actual, expected)", "def test_import_sparse_type_mat():\n x = sps.csr_matrix(np.random.rand(3, 2))\n export_data('/tmp/test.mat', x)\n assert x.dtype == import_data('/tmp/test.mat').dtype", "def test_import_sparse_values_mat():\n x = sps.csr_matrix(np.random.rand(3, 2))\n export_data('/tmp/test.mat', x)\n assert np.array_equal(x.toarray(), import_data('/tmp/test.mat').toarray())", "def get_cvxopt_sparse_intf():\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()", "def test_csr_matvec():\n\n from scipy.sparse import random\n\n m = 1000\n n = 500\n\n rng = numpy.random.default_rng(0)\n scipy_mat = random(m, n, format=\"csr\", random_state=rng)\n csr_mat = spmv.CsrMatrix(\n scipy_mat.data, scipy_mat.indices, scipy_mat.indptr, (m, n)\n )\n\n vec = rng.normal(size=n)\n\n expected = scipy_mat @ vec\n actual = csr_mat @ vec\n\n numpy.testing.assert_allclose(actual, expected)", "def sparse_matrix (base_type=float):\n return defaultdict (lambda: sparse_vector (base_type))", "def to_coo_matrix(self):\n row_indices, column_indices, nonzero_elements = self.to_ijv()\n return coo_matrix((nonzero_elements, (row_indices, column_indices)),\n shape=(self.size, self.size))", "def sparse(cls, a_ndarray, i_ndarray, shape, bigdl_type=\"float\"):\n if a_ndarray is None:\n return None\n invalidInputError(isinstance(a_ndarray, np.ndarray),\n f\"input should be a np.ndarray, not ${type(a_ndarray)}\")\n invalidInputError(isinstance(i_ndarray, np.ndarray),\n f\"indices should be a np.ndarray, not ${type(i_ndarray)}\")\n invalidInputError(i_ndarray.size == a_ndarray.size * shape.size,\n f\"size of values ${a_ndarray.size * shape.size} and\"\n f\" indices ${i_ndarray.size} should match\")\n return cls(a_ndarray,\n shape,\n bigdl_type,\n i_ndarray)", "def _dict_to_sparse(matrix_dict):\n return scipy.sparse.coo_matrix(\n (matrix_dict['data'], (matrix_dict['row'], matrix_dict['col'])),\n shape=matrix_dict['shape'])" ]
[ "0.6758371", "0.6584369", "0.63413817", "0.62148833", "0.6176313", "0.61322516", "0.61192065", "0.6099706", "0.60937256", "0.60803705", "0.5934155", "0.59088725", "0.5851765", "0.5839966", "0.58354145", "0.58263737", "0.58221644", "0.58093333", "0.5790453", "0.5756694", "0.5673294", "0.5671317", "0.56489956", "0.56209385", "0.56131834", "0.55949914", "0.55775136", "0.5575111", "0.5552646", "0.5551623" ]
0.84431887
0
User not logged in returns 403
def test_not_logged_in(self): response = self.c.get(reverse(submit_point), {'lat': 34.0, 'lng': 45.3, 'zoom': 13}) self.assertEqual(response.status_code, 403)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def before_request():\n if g.current_user.is_anonymous:\n return forbidden('Not signed in')\n\n if not g.current_user.confirmed:\n return forbidden('Unconfirmed account')", "def get_authenticated_denied(self):", "def before_request() -> None:\n if current_user.is_anonymous() or not current_user.is_allowed():\n abort(401)", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_not_logged_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def user_logged_in():\n if not session.get('user_id'):\n return \"nope\", 401\n else:\n return \"yep\", 200", "def denied_response(self, req):\n if req.remote_user:\n return HTTPForbidden(request=req)\n else:\n return HTTPUnauthorized(request=req)", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def check_user_and_login(self) -> Response:\n pass", "def __require_privilaged_access(self):\n if not self.getLoggedInUser():\n raise codechecker_api_shared.ttypes.RequestFailed(\n codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED,\n \"The server must be start by using privilaged access to \"\n \"execute this action.\")", "def test_login_required(self):\n self.client.logout()\n response = self.client.post(self.get_url(), {})\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_if_forbiden_for_authenticated_permissions(self):\r\n res = self.client_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def unauthorized():\n flash('You must be logged in to view that page')\n return redirect(url_for('catalog_bp.index'))", "def profile_unlogged():\n cookie = {'session_id': None}\n response = requests.get(f'{URL}/profile', cookies=cookie)\n assert response.status_code == 403", "def test_profile_api_anon(self):\n self.client.logout()\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 403)", "def deny_access():\n flash('You must login first.')\n return redirect(url_for('home'))", "def test_not_authenticated(self):\n response = self.client.get(telemetry_url)\n self.assertEqual(403, response.status_code)", "def check_admin():\r\n if not current_user.is_admin:\r\n abort(403)", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return self.login()", "def login_required_403(view):\n @wraps(view)\n def dec_view(request, *args, **kwargs):\n if not request.user.is_authenticated():\n return JsonResponse({\"detail\": \"You have to log in\"}, status=403)\n\n return view(request, *args, **kwargs)\n\n return dec_view", "def test_user_get_profile_not_authorized(self):\n self.client.logout()\n response = self.client.get(CONSTS.USER_PROFILE_URL)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_retrive_user_unauthenticated(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)" ]
[ "0.75391126", "0.75391126", "0.75391126", "0.75391126", "0.745583", "0.729318", "0.72895944", "0.728383", "0.7196082", "0.7116251", "0.7104749", "0.7040694", "0.7040694", "0.70018977", "0.70018977", "0.70018977", "0.691351", "0.69067097", "0.68914956", "0.6850118", "0.681733", "0.68127215", "0.67939454", "0.67914516", "0.67658204", "0.6749728", "0.67048717", "0.6699602", "0.6691799", "0.6678031" ]
0.7585612
0
The same point is not submitted twice
def test_same_point_not_submitted(self): self.c.force_login(self.u) data = {'lat': 34.0, 'lng': 45.3, 'zoom': 13} response = self.c.get(reverse(submit_point), data) response = self.c.get(reverse(submit_point), data) response = self.c.get(reverse(submit_point), data) response = self.c.get(reverse(submit_point), data) response = self.c.get(reverse(submit_point), data) response = self.c.get(reverse(submit_point), data) response = self.c.get(reverse(submit_point), data) self.assertEqual(Point.objects.count(), 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post_duplicate_question(self):\n\n self.post_question()\n return self.post_question()", "def post_duplicate_meetup(self):\n\n self.post_meetup()\n return self.post_meetup()", "def test_create_single_poll_submission(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def ok(self, point):\n return True", "def ok(self, point):\n return True", "def __check_for_duplicates(self, point) -> bool:\n # Check all already published (active) dirt objects (stored and received from the goal_list)\n for dirt in self.active_dirt_list:\n if self.__comparing_points(point, dirt.pose.position):\n return True\n return False", "def onpick(cls, event):\n if cls.rate_limiting():\n return True\n\n if len(event.ind) != 1:\n print(\"Two or more points are too close! Please zoom in.\")\n print(\"Showing the one with higher fitness score\")\n\n cloud_plot = gs.canvas2cloud_plot[event.canvas]\n artist = event.artist\n ind = event.ind[-1]\n button = event.mouseevent.button\n\n if button == 1:\n cls.button_1(cloud_plot, artist, ind)\n elif button == 3:\n cls.button_3(cloud_plot, artist, ind)", "def validate(self) -> None:\n points = self._points\n if not points:\n raise ValueError('Multipoint is empty.')\n elif len(points) > len(self._points_set):\n raise ValueError('Duplicate points found.')\n for point in points:\n point.validate()", "def postPoint(request, Form):\n\tform = Form(request.POST)\n\tform.data = form.data.copy()\n\n\t# Convert coords to valid geometry\n\ttry:\n\t\tform.data['geom'] = normalizeGeometry(form.data['geom'])\n\texcept(ValueError):\n\t\t# TODO provide error message to user here\n\t\tJsonResponse({'success': False})\n\t\t# messages.error(request, '<strong>' + _('Error') + '</strong><br>' + _('No point was selected for this type of report.'))\n\n\t# Validate and submit to db\n\tif form.is_valid():\n\t\tpoint = form.save()\n\t\t# Errors with push notifications should not affect reporting\n\t\tif not settings.DEBUG:\n\t\t\ttry: pushNotification.pushNotification(point)\n\t\t\texcept: pass\n\n\t\treturn JsonResponse({\n\t\t\t'success': True,\n\t\t\t'point': GeoJSONSerializer().serialize([point,]),\n\t\t\t'point_type': point.p_type,\n\t\t\t'form_html': render_crispy_form(Form())\n\t\t})\n\telse:\n\t\tlogger.debug(\"Form not valid\")\n\n\t# Else: error occurred\n\tform.data['geom'] = form.data['geom'].json\n\tform_html = render_crispy_form(form)\n\treturn JsonResponse({'success': False, 'form_html': form_html})", "def IsInsertedPoint(self, ):\n ...", "def check_duplicate(self, state):\n pass", "def buildsetSubmitted(buildset):", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def test_post_duplicate_question(self):\n self.post_question(self.valid_question2)\n\n\n response = self.post_question(self.valid_question2)\n self.assertEqual(response.status_code, 400)", "def _post_submit(self):\n if not isinstance(self._snapshots, bool):\n self.snapshot(self._snapshots)\n\n self.update(True)", "def form_valid(self, form):\n\n timestamp = datetime.datetime.now()\n title = form.cleaned_data['title']\n tagnames = form.cleaned_data['tags']\n text = form.cleaned_data['text']\n in_nomine = form.cleaned_data['in_nomine']\n total_threshold = int(form.cleaned_data['threshold'])\n\n question = self.request.user.post_question(\n title = title,\n body_text = text,\n tags = tagnames,\n wiki = False,\n is_anonymous = False,\n timestamp = timestamp\n )\n\n action = question.action \n\n # check if this action is created by an organization\n if in_nomine[:3] == \"org\":\n in_nomine_pk = int(in_nomine[4:])\n log.debug(\"IN_NOMINE %s _PK %s\" % (in_nomine[:3], in_nomine[4:]))\n action.in_nomine_org = Organization.objects.get(pk=in_nomine_pk)\n # We update \"in_nomine_org\" and no other action parameters below,\n # so it is safe and good to \"save\" here.\n action.save()\n\n categories = form.cleaned_data['category_set']\n action.category_set.add(*categories)\n\n geoname_data = form.cleaned_data['geoname_set']\n #COMMENT WARNING TODO: we should have a specific GeonameField\n # which has a clean() method that does the following kludge\n # The same is valid for other m2m fields below\n geonames = self.get_or_create_geonames(geoname_data)\n action.geoname_set.add(*geonames)\n \n politician_data = form.cleaned_data['politician_set']\n politicians = self.get_or_create_politicians(politician_data)\n action.politician_set.add(*politicians)\n \n medias = form.cleaned_data['media_set']\n #TODO: Matteo \n action.media_set.add(*medias)\n\n success_url = action.get_absolute_url()\n return redirect(success_url)", "def post(self):" ]
[ "0.6164863", "0.5824898", "0.57879406", "0.5720838", "0.5720838", "0.5641985", "0.5631077", "0.5560973", "0.55347097", "0.54725015", "0.5429235", "0.5414906", "0.5264571", "0.5264571", "0.5264571", "0.5264571", "0.5264571", "0.5264571", "0.5264571", "0.5264571", "0.5264571", "0.5264571", "0.5264571", "0.5264571", "0.5264571", "0.5264571", "0.5263165", "0.52591825", "0.5240543", "0.52342653" ]
0.7431294
0
Return thumbnail for image.
def get_thumbnail(img_path): thumb = JPEGImage(unicode(img_path)).exif_thumbnail.as_blob() if thumb: logger.debug("Using EXIF thumbnail for {0}".format(img_path)) return thumb else: logger.debug("Generating thumbnail for {0}".format(img_path)) return scale_image(unicode(img_path), width=160)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def thumbnail(self, item):\n if self._has_image_field(item) and self._field_is_visible('image'):\n tile_conf = self.get_tile_configuration()\n image_conf = tile_conf.get('image', None)\n if image_conf:\n scaleconf = image_conf['imgsize']\n # Scale string is something like: 'mini 200:200'.\n # We need the name only: 'mini'.\n scale = scaleconf.split(' ')[0]\n scales = ploneapi.content.get(path='@@images')\n return scales.scale('image', scale)", "def thumbnail(self):\n\n if self._thumbnail is None:\n cover = self.cover()\n\n if cover is not None:\n self._thumbnail = cover.resize(THUMBNAIL_SIZE, Image.ANTIALIAS)\n\n return self._thumbnail", "def thumbnail(self, img_path):\n\n thumb_path = self.thumbnail_path(img_path)\n\n if os.path.exists(thumb_path):\n return thumb_path\n else:\n self.queue_thumbnail(img_path)\n return None", "def thumbnail(self, item):\n if self._has_image_field(item) and self._field_is_visible(\"image\"):\n tile_conf = self.get_tile_configuration()\n image_conf = tile_conf.get(\"image\", None)\n if image_conf:\n scaleconf = image_conf[\"imgsize\"]\n # scale string is something like: 'mini 200:200' and\n # we need the name only: 'mini'\n if scaleconf == \"_original\":\n scale = None\n else:\n scale = scaleconf.split(\" \")[0]\n scales = item.restrictedTraverse(\"@@images\")\n return scales.scale(\"image\", scale)", "def get_thumbnail(self, size):\n\n thumb = self.associated_images[b'thumbnail']\n return thumb", "def thumbnail(self):\n return self._thumbnail", "def thumbnail(self):\n return self.get_thumbnail_url()", "def get_thumbnail_url():", "def thumbnail(im, config):\n\n im.thumbnail(\n (config['width'], config['height']),\n ANTIALIAS,\n )\n\n return im", "def thumbnail_source(self):\n try:\n return self.chunks['thumbnail_source']\n except KeyError:\n # No thumbnail_source so return the image instead. For single-scale\n # we use the image as the thumbnail_source.\n return self.chunks.get('image')", "def resize_image(image, size=(926, 617)):\n\n im = Image.open(image)\n im.convert('RGB')\n im.thumbnail(size)\n thumb_io = BytesIO()\n im.save(thumb_io, 'JPEG', quality=85)\n thumbnail = File(thumb_io, name=image.name)\n return thumbnail", "def make_thumbnail(image, size=(100, 100)):\n logging.debug(image)\n\n im = create_colorblind_image(image)\n\n thumb_io = BytesIO() # create a BytesIO object\n\n im.save(thumb_io, 'PNG', quality=85) # save image to BytesIO object\n\n thumbnail = File(thumb_io, name=image.name) # create a django friendly File object\n\n return thumbnail", "def GetThumbnail(self, type, maxsize): # real signature unknown; restored from __doc__\n pass", "def _get_thumbnail_image_from_file(dir_path, image_file):\n # Get image\n img = _get_image_from_file(dir_path, image_file)\n # If it's not supported, exit now\n if img is None:\n return None\n if img.format.lower() == 'gif':\n return None\n # Get image dimensions\n img_width, img_height = img.size\n # We need to perform a resize - first, work out the scale ratio to take the\n # image width to args.width (args.width:img_width ratio)\n scale_ratio = args.width / float(img_width)\n # Work out target image height based on the scale ratio\n target_height = int(scale_ratio * img_height)\n # Perform the resize\n try:\n img.thumbnail((args.width, target_height), resample=RESAMPLE)\n except IOError as exptn:\n print('WARNING: IOError when thumbnailing %s/%s: %s' % (\n dir_path, image_file, exptn\n ))\n return None\n # Return the resized image\n return img", "def get_thumbnail(format):", "def thumbnail(self, width, height, path, **kwargs):\n return self.get('fileops/thumbnail', api='CONV', params={\n 'root': self.root,\n 'path': path,\n 'width': width,\n 'height': height,\n }, **kwargs)", "def get_possible_thumbnail(self):\n meta = self.get_meta_data()\n print meta\n if \"og:image\" in meta:\n return meta[\"og:image\"]\n elif \"twitter:image:src\" in meta:\n return meta[\"twitter:image:src\"]\n else:\n images = self.get_image_data()\n temp_url = \"\"\n temp_width = 0\n for img in images:\n if img[\"image_width\"] > temp_width:\n temp_url = img[\"image_url\"]\n temp_width = img[\"image_width\"]\n\n return temp_url", "def mosaic_thumbnail(self):\n serial = slugify(self.request.matchdict[\"serial\"])\n filename = \"thumbnails/%s/mosaic.png\" % serial\n return FileResponse(filename)", "def get_thumbnail_url(self):\n return self.thumbnail_url", "def make_thumbnail(self):\n # https://gist.github.com/valberg/2429288\n\n # make sure image data is set\n if not self.image_data:\n return False\n\n if self.proxy_data:\n return True\n\n # Create a resized version of the image\n image = Image.open(self.image_data)\n image.thumbnail(THUMBNAIL_SIZE, Image.BICUBIC)\n\n # Save the thumbnail to in-memory 'file'\n temp_thumb = BytesIO()\n image.save(temp_thumb, 'jpeg')\n temp_thumb.seek(0) # rewinds the file\n\n # Save image to a SimpleUploadFile which can be saved\n # into ImageField\n # TODO figure out how to pass base image's UUID before\n # image is committed to DB\n basename = os.path.basename(self.image_data.name)\n uuidname = os.path.splitext(basename)[0]\n suf = SimpleUploadedFile(uuidname,\n temp_thumb.read(), content_type='image/jpeg')\n thumb_filename = '{}_thumb.jpeg'.format(suf.name)\n\n # set save=False, or else it will infinite loop\n self.proxy_data.save(thumb_filename,\n suf,\n save=False)\n\n # Also store the real dimensions for the Pillow thumbnail\n self.proxy_width, self.proxy_height = image.size\n\n temp_thumb.close()\n\n return True", "def getThumbnail(self):\n logger.debug(\"Func: getThumbnail\")\n\n return os.path.join(self.projectDir, self._currentThumbFile)", "def img_url_thumbnail(self):\n url = '%s=s%s-c' % (self.img_url, self.THUMBNAIL_SIZE_PX)\n if self.img_rot in Plaque.ALLOWED_ROTATIONS:\n url = \"%s-r%s\" % (url, self.img_rot)\n return url", "def resize(img):\n size = (500, 500)\n img.thumbnail(size)\n return img", "def get_thumbnail_name(self, thumbnail_name, with_size=None):", "def thumbnail_url(self):\n return None", "def get_thumbnail(self):\n try:\n app1_segment = self._segments['APP1']\n except KeyError:\n thumbnail_hex_string = None\n else:\n thumbnail_hex_string = app1_segment.thumbnail_hex_string\n\n if not thumbnail_hex_string:\n raise RuntimeError(\"image does not contain thumbnail\")\n\n return binascii.unhexlify(thumbnail_hex_string)", "def get_thumbnail_url(self, image_url):\n\n return settings.THUMBNAILER_URL + image_url", "def top_thumbnail(self):\n serial = slugify(self.request.matchdict[\"serial\"])\n filename = \"thumbnails/%s/top.png\" % serial\n return FileResponse(filename)", "def setThumbnailImage(*args):", "def generateThumbnail(img):\n\n if not img._thumbfn:\n return\n\n aimgfn = join(opts.root, img._filename)\n if not opts.fast:\n img._size = imageSize(aimgfn)\n\n athumbfn = join(opts.root, img._thumbfn)\n\n if opts.thumb_force:\n if opts.quiet: print \"forced regeneration of '%s'\" % img._thumbfn\n elif not exists(athumbfn):\n if opts.quiet: print \"thumbnail absent '%s'\" % img._thumbfn\n else:\n # Check if thumbsize has changed\n if not opts.fast:\n img._thumbsize = imageSize(athumbfn)\n if not checkThumbSize(img._size, \\\n img._thumbsize, \\\n opts.thumb_size):\n if opts.quiet: print \"thumbnail '%s size has changed\" % img._thumbfn\n try:\n # Clear cache for thumbnail size.\n del imageSizeCache[ athumbfn ]\n except:\n pass\n else:\n# pass\n# if opts.quiet: print \"thumbnail '%s' already generated (size ok)\" \\\n# % img._thumbfn\n return\n else:\n if opts.quiet: print \"thumbnail '%s' already generated\" % img._thumbfn\n return\n\n if opts.no_magick:\n if opts.quiet: print \"ImageMagick tools disabled, can't create thumbnail\"\n return\n\n # create necessary directories\n d = dirname(athumbfn)\n if not exists(d):\n os.makedirs(d)\n\n if opts.pil:\n\n try:\n im = PilImage.open(aimgfn)\n im.thumbnail((opts.thumb_size, opts.thumb_size), config.Thumbnails[\"Interpolation\"])\n im.save(athumbfn)\n\n img._thumbsize = im.size\n except IOError, e:\n raise SystemExit(\\\n \"Error: identifying file '%s'\" % aimgfn + str(e))\n\n else:\n\n cmd = getMagickProg('convert') + ' -border 2x2 '\n # FIXME check if this is a problem if not specified\n #cmd += '-interlace NONE '\n\n cmd += '-geometry %dx%d ' % (opts.thumb_size, opts.thumb_size)\n\n if opts.thumb_quality:\n cmd += '-quality %d ' % opts.thumb_quality\n\n # This doesn't add text into the picture itself, just the comment in\n # the header.\n if opts.copyright:\n cmd += '-comment \\\"%s\\\" ' % opts.copyright\n\n # We use [1] to extract the thumbnail when there is one.\n # It is harmless otherwise.\n subimg = \"\"\n if img._ext.lower() in [ \".jpg\", \".tif\", \".tiff\" ]:\n subimg = \"[1]\"\n\n cmd += '\"%s%s\" \"%s\"' % (aimgfn, subimg, athumbfn)\n\n if opts.quiet: print \"generating thumbnail '%s'\" % img._thumbfn\n\n (chin, chout, cherr) = os.popen3(cmd)\n errs = cherr.readlines()\n chout.close()\n cherr.close()\n if errs:\n print >> sys.stderr, \\\n \"Error: running convert program on %s:\" % aimgfn\n errs = string.join(errs, '\\n')\n print errs\n\n if subimg and \\\n re.compile('Unable to read subimage').search(errs):\n if opts.quiet: print \"retrying without subimage\"\n cmd = string.replace(cmd, subimg, \"\")\n\n (chin, chout, cherr) = os.popen3(cmd)\n errs = cherr.readlines()\n chout.close()\n cherr.close()\n if errs:\n print >> sys.stderr, \\\n \"Error: running convert program on %s:\" % aimgfn\n print string.join(errs, '\\n')\n\n else:\n img._thumbsize = imageSize(athumbfn)" ]
[ "0.7674613", "0.76600677", "0.7653746", "0.7650969", "0.75755334", "0.7431497", "0.73779297", "0.71744597", "0.71693736", "0.71072376", "0.7059326", "0.705206", "0.7002036", "0.6974647", "0.695208", "0.69503266", "0.68614525", "0.6830077", "0.68119925", "0.6811585", "0.6805786", "0.67553765", "0.6747184", "0.6705275", "0.6670691", "0.6667246", "0.6608435", "0.6556853", "0.65559316", "0.65498286" ]
0.8027751
0
Context Manager that mounts the first available partition on a USB drive, yields its path and then unmounts it.
def mount_stick(stick): mount = stick.get_dbus_method( "FilesystemMount", dbus_interface="org.freedesktop.UDisks.Device") path = mount('', []) try: yield path except Exception as e: raise e finally: unmount = stick.get_dbus_method( "FilesystemUnmount", dbus_interface="org.freedesktop.UDisks.Device") unmount([], timeout=1e6) # dbus-python doesn't know an infinite # timeout... unmounting sometimes takes a # long time, since the device has to be # synced.
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def active_mountpoint(mount_point):\n execute('mount', mount_point, sudo=True)\n yield\n execute('umount', mount_point, sudo=True)", "def device_mounted(uuid):\n out, err = run_cmd(['lsblk', '-o', 'NAME,UUID,MOUNTPOINT', '--json'])\n\n blockdevices = json.loads(out)['blockdevices']\n\n for blkdevice in blockdevices:\n if key_exists('children', blkdevice):\n for child in blkdevice['children']:\n if key_exists('mountpoint', child) and child['uuid'] == uuid:\n return child['mountpoint']", "def _wipe(self):\n log_method_call(self, self.name, status=self.status)\n\n start = self.partedPartition.geometry.start\n part_len = self.partedPartition.geometry.end - start\n bs = self.partedPartition.geometry.device.sectorSize\n device = self.partedPartition.geometry.device.path\n\n # Erase 1MiB or to end of partition\n count = int(Size(\"1 MiB\") / bs)\n count = min(count, part_len)\n\n cmd = [\"dd\", \"if=/dev/zero\", \"of=%s\" % device, \"bs=%s\" % bs,\n \"seek=%s\" % start, \"count=%s\" % count]\n try:\n util.run_program(cmd)\n except OSError as e:\n log.error(str(e))\n finally:\n # If a udev device is created with the watch option, then\n # a change uevent is synthesized and we need to wait for\n # things to settle.\n udev.settle()", "def _mount_stick_shell(stick):\n out = subprocess.check_output(\"udisks --mount {0}\"\n .format(stick).split())\n path = re.match(r\"Mounted .* at (.*)\", out).group(1)\n try:\n yield path\n except Exception as e:\n raise e\n finally:\n subprocess.check_output(\"udisks --unmount {0}\"\n .format(stick).split())", "def mounted(source, dest=None, opts=None, fs_type=None,\n mount_attempts=1, umount_attempts=3):\n params = []\n if opts:\n params.extend(['-o', ','.join(opts)])\n if fs_type:\n params.extend(['-t', fs_type])\n\n if dest is None:\n dest = tempfile.mkdtemp()\n clean_up = True\n else:\n clean_up = False\n\n mounted = False\n try:\n execute(\"mount\", source, dest, *params, run_as_root=True,\n attempts=mount_attempts, delay_on_retry=True)\n mounted = True\n yield dest\n finally:\n if mounted:\n try:\n execute(\"umount\", dest, run_as_root=True,\n attempts=umount_attempts, delay_on_retry=True)\n except (EnvironmentError,\n processutils.ProcessExecutionError) as exc:\n LOG.warning(\n 'Unable to unmount temporary location %(dest)s: %(err)s',\n {'dest': dest, 'err': exc})\n # NOTE(dtantsur): don't try to remove a still mounted location\n clean_up = False\n\n if clean_up:\n try:\n shutil.rmtree(dest)\n except EnvironmentError as exc:\n LOG.warning(\n 'Unable to remove temporary location %(dest)s: %(err)s',\n {'dest': dest, 'err': exc})", "def wait_for_disk_to_become_available(device):\n pids = ['']\n stderr = ['']\n interval = CONF.disk_partitioner.check_device_interval\n max_retries = CONF.disk_partitioner.check_device_max_retries\n\n def _wait_for_disk():\n # A regex is likely overkill here, but variations in fuser\n # means we should likely use it.\n fuser_pids_re = re.compile(r'\\d+')\n\n # There are 'psmisc' and 'busybox' versions of the 'fuser' program. The\n # 'fuser' programs differ in how they output data to stderr. The\n # busybox version does not output the filename to stderr, while the\n # standard 'psmisc' version does output the filename to stderr. How\n # they output to stdout is almost identical in that only the PIDs are\n # output to stdout, with the 'psmisc' version adding a leading space\n # character to the list of PIDs.\n try:\n # NOTE(ifarkas): fuser returns a non-zero return code if none of\n # the specified files is accessed.\n # NOTE(TheJulia): fuser does not report LVM devices as in use\n # unless the LVM device-mapper device is the\n # device that is directly polled.\n # NOTE(TheJulia): The -m flag allows fuser to reveal data about\n # mounted filesystems, which should be considered\n # busy/locked. That being said, it is not used\n # because busybox fuser has a different behavior.\n # NOTE(TheJuia): fuser outputs a list of found PIDs to stdout.\n # All other text is returned via stderr, and the\n # output to a terminal is merged as a result.\n out, err = execute('fuser', device, check_exit_code=[0, 1],\n run_as_root=True)\n\n if not out and not err:\n return True\n\n stderr[0] = err\n # NOTE: findall() returns a list of matches, or an empty list if no\n # matches\n pids[0] = fuser_pids_re.findall(out)\n\n except processutils.ProcessExecutionError as exc:\n LOG.warning('Failed to check the device %(device)s with fuser:'\n ' %(err)s', {'device': device, 'err': exc})\n return False\n\n retry = tenacity.retry(\n retry=tenacity.retry_if_result(lambda r: not r),\n stop=tenacity.stop_after_attempt(max_retries),\n wait=tenacity.wait_fixed(interval),\n reraise=True)\n try:\n retry(_wait_for_disk)()\n except tenacity.RetryError:\n if pids[0]:\n raise exception.IronicException(\n _('Processes with the following PIDs are holding '\n 'device %(device)s: %(pids)s. '\n 'Timed out waiting for completion.')\n % {'device': device, 'pids': ', '.join(pids[0])})\n else:\n raise exception.IronicException(\n _('Fuser exited with \"%(fuser_err)s\" while checking '\n 'locks for device %(device)s. Timed out waiting for '\n 'completion.')\n % {'device': device, 'fuser_err': stderr[0]})", "def find_dev_mount_point(self, usb_table):\n mounts = open(\"/proc/mounts\")\n mount_lines = mounts.readlines()\n table = usb_table\n i = 0\n for device in table:\n for line in mount_lines:\n arguments = line.split(\" \")\n if arguments[0] == device[0]:\n usb_table[i].append(arguments[1])\n usb_table[i] = self.get_drive_stat(usb_table[i])\n break\n i += 1\n return usb_table", "def unmount(mount_point):\n # type: (str) -> None\n\n p = subprocess.Popen(['umount', mount_point], stderr=subprocess.PIPE)\n out, err = p.communicate()\n if p.returncode > 0:\n raise OSError(err)\n else:\n os.rmdir(mount_point)", "def unlocked_device(crypto_device):\n execute('cryptdisks_start', crypto_device, sudo=True)\n yield\n execute('cryptdisks_stop', crypto_device, sudo=True)", "def bootpart(disks):\n return path_to_partition(disks, '/boot/foo')", "def mount_single(partition_size, drives):\n for drive_list in drives:\n if are_equal(drive_list, partition_size):\n for drive_info, partition_info in zip(drive_list, partition_size):\n mount_pattern = \"mount -t ntfs -o uid=1000,gid=1000,umask=0002 /dev/{} {}\"\n mount_cmd = mount_pattern.format(drive_info[1], partition_info[1])\n print(mount_cmd)", "def eject(mountpoint=DMG_MOUNT, silent=False, dry_run=ARGS.dry_run):\n if not isinstance(mountpoint, Path):\n mountpoint = Path(mountpoint)\n\n cmd = ['/usr/bin/hdiutil', 'eject', str(mountpoint)]\n\n if not dry_run and not mountpoint.exists():\n LOG.warning('Cannot unmount {mountpoint} - it does not exist'.format(mountpoint=mountpoint))\n elif not dry_run and mountpoint.exists():\n _p = subprocess.run(cmd, capture_output=True, encoding='utf-8')\n LOG.debug('{cmd} ({returncode})'.format(cmd=' '.join([str(x) for x in cmd]), returncode=_p.returncode))\n\n if _p.returncode == 0:\n if not silent:\n LOG.info('Unmounted {mountpoint}'.format(mountpoint=mountpoint))\n else:\n LOG.debug('Error: '. _p.stderr.strip() if _p.stderr else _p.stdout.strip())\n elif ARGS.dry_run and not dry_run:\n LOG.warning('Unmount {mountpoint}'.format(mountpoint=mountpoint))", "def do_mount(devpath, mountpoint, fstype):\n try:\n if check_already_mounted(devpath, mountpoint):\n return\n\n mounter = Mounter()\n mounter.mount(devpath, mountpoint, fstype)\n except exceptions.MountException:\n try:\n mounter.make_filesystem(devpath, fstype)\n mounter.mount(devpath, mountpoint, fstype)\n except exceptions.FuxiException as e:\n with excutils.save_and_reraise_exception():\n LOG.error(str(e))", "def unmap(self, ignore_fail=False):\n run_cmd('kpartx', '-d', self.filename, ignore_fail=ignore_fail)\n for part in self.partitions:\n self.mapdev = None", "def mount_device(uuid):\n mount_point = f'/mnt/{uuid}/back-up'\n # Create mountpoint if it doesn't exist\n pathlib.Path(mount_point).mkdir(parents=True, exist_ok=True)\n\n # Mount device\n out, err = run_cmd(['mount', '--uuid', uuid, mount_point])\n\n if not err:\n return mount_point\n else:\n abort(err, cause='mount')", "def umount(vol_name, mountpoint, instance):\n with __manager__.get_state() as mount_state:\n mount_state.umount(vol_name, mountpoint, instance)", "def mount(self, mount_point):\n log.debug(\"Mounting {0} for {1}\".format(mount_point, self.fs.get_full_name()))\n for counter in range(30):\n if self.status == volume_status.ATTACHED:\n if os.path.exists(mount_point):\n # Check if the mount location is empty\n if len(os.listdir(mount_point)) != 0:\n log.warning(\"Mount point {0} already exists and is not \"\n \"empty!? ({2}) Will attempt to mount volume {1}\"\n .format(mount_point, self.volume_id,\n os.listdir(mount_point)))\n # return False\n else:\n log.debug(\"Creating mount point directory {0} for {1}\"\n .format(mount_point, self.fs.get_full_name()))\n try:\n os.mkdir(mount_point)\n except Exception, e:\n log.warning(\"Could not create {0} mount point {1}: {2}\"\n .format(self.fs.get_full_name(), mount_point, e))\n # Potentially wait for the device to actually become available in the system\n # TODO: Do something if the device is not available in the\n # given time period\n for i in range(10):\n if os.path.exists(self.device):\n log.debug(\"Device path {0} checked and it exists.\".format(\n self.device))\n break\n else:\n log.debug(\"Device path {0} does not yet exist; waiting...\".format(\n self.device))\n time.sleep(4)\n # Until the underlying issue is fixed (see FIXME below), mask this\n # even more by custom-handling the run command and thus not\n # printing the err\n cmd = '/bin/mount %s %s' % (self.device, mount_point)\n try:\n process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n _, _ = process.communicate()\n if process.returncode != 0:\n # FIXME: Assume if a file system cannot be mounted that it's because\n # there is not a file system on the device so try creating\n # one\n if run('/sbin/mkfs.xfs %s' % self.device,\n \"Failed to create a file system on device %s\" % self.device,\n \"Created a file system on device %s\" % self.device):\n if not run(\n '/bin/mount %s %s' % (self.device, mount_point),\n \"Error mounting file system %s from %s\" % (\n mount_point, self.device),\n \"Successfully mounted file system %s from %s\" %\n (mount_point, self.device)):\n log.error(\"Failed to mount device '%s' to mount point '%s'\"\n % (self.device, mount_point))\n return False\n # Resize the volume if it was created from a snapshot\n else:\n if self.snapshot and self.volume.size > self.snapshot.volume_size:\n run('/usr/sbin/xfs_growfs %s' % mount_point)\n log.info(\n \"Successfully grew file system {0}\".format(self.fs.get_full_name()))\n except Exception, e:\n log.error(\"Exception mounting {0} at {1}\".format(\n self.fs.get_full_name(), mount_point))\n return False\n try:\n # Default owner of all mounted file systems to `galaxy`\n # user\n os.chown(mount_point, pwd.getpwnam(\n \"galaxy\")[2], grp.getgrnam(\"galaxy\")[2])\n # Add Galaxy- and CloudBioLinux-required files under the\n # 'data' dir\n if ServiceRole.GALAXY_DATA in self.fs.svc_roles:\n for sd in ['files', 'tmp', 'upload_store', 'export']:\n path = os.path.join(\n self.app.path_resolver.galaxy_data, sd)\n if not os.path.exists(path):\n os.mkdir(path)\n # Make 'export' dir that's shared over NFS be\n # owned by `ubuntu` user so it's accesible\n # for use to the rest of the cluster\n if sd == 'export':\n os.chown(path, pwd.getpwnam(\n \"ubuntu\")[2], grp.getgrnam(\"ubuntu\")[2])\n else:\n os.chown(path, pwd.getpwnam(\n \"galaxy\")[2], grp.getgrnam(\"galaxy\")[2])\n except OSError, e:\n log.debug(\n \"Tried making 'galaxyData' sub-dirs but failed: %s\" % e)\n # If based on an archive, extract archive contents to the mount point\n if self.from_archive:\n # Do not overwrite an existing dir structure w/ the archive\n # content. This happens when a cluster is rebooted.\n if self.fs.name == 'galaxy' and \\\n os.path.exists(self.app.path_resolver.galaxy_home):\n log.debug(\"Galaxy home dir ({0}) already exists; not \"\n \"extracting the archive ({1}) so not to \"\n \"overwrite it.\".format(self.app.path_resolver.galaxy_home,\n self.from_archive['url']))\n self.fs.nfs_share_and_set_state()\n else:\n self.fs.state = service_states.CONFIGURING\n # Extract the FS archive in a separate thread\n ExtractArchive(self.from_archive['url'], mount_point,\n self.from_archive['md5_sum'],\n callback=self.fs.nfs_share_and_set_state).run()\n else:\n self.fs.nfs_share_and_set_state()\n return True\n else:\n log.warning(\"Cannot mount volume '%s' in state '%s'. Waiting \"\n \"(%s/30).\" % (self.volume_id, self.status, counter))\n time.sleep(2)", "def detachDiskFromMinipad(self , disk):\n return", "def mpt():\n lbl_drives = ['device','mountpoint','fstype']\n disks = [d[0:3] for d in psutil.disk_partitions()]\n drives = [dict(zip(lbl_drives,ds)) for ds in disks]\n return [d['mountpoint']for d in drives]", "def _umount_with_detach(entry_path):\n try:\n fs_linux.umount_filesystem(entry_path)\n except OSError as err:\n _LOGGER.warning('Failed to umount %s: %s',\n entry_path, err)\n # 16 means device busy\n if err.errno == 16:\n try:\n fs_linux.umount_filesystem(entry_path, lazy=True)\n except OSError as err:\n _LOGGER.warning('Failed to lazy umount %s: %s',\n entry_path, err)", "def isMounted(device):\n for _device, _path in getMounted():\n if device == _device:\n return _path\n return ''", "def medium_partition():\n try:\n usb_partitions = sort_partitions()\n usb_partitions.pop(0) # remove smallest\n usb_partitions.pop(len(usb_partitions) - 1) # remove largest\n except IndexError:\n print(\"Not enough USB devices available\")\n exit(1)\n else:\n return str(usb_partitions[0][0])", "def get_disk_by_mountpoint(mnt_point):\n diskparts = psutil.disk_partitions()\n for item in diskparts:\n if item.mountpoint == mnt_point:\n return realpath(item.device)\n return None", "def is_held(dev):\n assert os.path.exists(dev)\n dev = os.path.realpath(dev)\n base = get_dev_name(dev)\n\n # full disk?\n directory = '/sys/block/{base}/holders'.format(base=base)\n if os.path.exists(directory):\n return os.listdir(directory)\n\n # partition?\n part = base\n while len(base):\n directory = '/sys/block/{base}/{part}/holders'.format(part=part, base=base)\n if os.path.exists(directory):\n return os.listdir(directory)\n base = base[:-1]\n return []", "def mount(mapping, loaded_data):\n for drive_size, partition_infos in mapping:\n mount_single(partition_infos, loaded_data[drive_size])", "def mount(fstype, export, vol_name, mountpoint, instance, options=None):\n with __manager__.get_state() as mount_state:\n mount_state.mount(fstype, export, vol_name, mountpoint, instance,\n options)", "def _load_disk(self):\r\n pass", "def reconstruct(self):\n volumes = list(sorted((v for v in self.get_volumes() if v.mountpoint and v.lastmountpoint),\n key=lambda v: v.mountpoint or \"\", reverse=True))\n\n try:\n root = list(filter(lambda x: x.lastmountpoint == '/', volumes))[0]\n except IndexError:\n self._debug(\"[-] Could not find / while reconstructing, aborting!\")\n return None\n\n volumes.remove(root)\n\n for v in volumes:\n v.bindmount(os.path.join(root.mountpoint, v.lastmountpoint[1:]))\n return root", "def mount_single_volume(self):\n\n for disk in self.disks:\n self._debug(\" Mounting volumes in {0}\".format(disk))\n for volume in disk.mount_single_volume():\n yield volume", "def mount(self, fstype, export, vol_name, mountpoint, instance, options):\n\n # NOTE(mdbooth): mount() may currently be called multiple times for a\n # single attachment. Any operation which calls\n # LibvirtDriver._hard_reboot will re-attach volumes which are probably\n # already attached, resulting in multiple mount calls.\n\n LOG.debug('_HostMountState.mount(fstype=%(fstype)s, '\n 'export=%(export)s, vol_name=%(vol_name)s, %(mountpoint)s, '\n 'options=%(options)s) generation %(gen)s',\n {'fstype': fstype, 'export': export, 'vol_name': vol_name,\n 'mountpoint': mountpoint, 'options': options,\n 'gen': self.generation}, instance=instance)\n with self._get_locked(mountpoint) as mount:\n if os.path.ismount(mountpoint):\n LOG.debug(('Mounting %(mountpoint)s generation %(gen)s, '\n 'mountpoint already mounted'),\n {'mountpoint': mountpoint, 'gen': self.generation},\n instance=instance)\n else:\n LOG.debug('Mounting %(mountpoint)s generation %(gen)s',\n {'mountpoint': mountpoint, 'gen': self.generation},\n instance=instance)\n\n fileutils.ensure_tree(mountpoint)\n\n try:\n nova.privsep.fs.mount(fstype, export, mountpoint, options)\n except processutils.ProcessExecutionError:\n # Check to see if mountpoint is mounted despite the error\n # eg it was already mounted\n if os.path.ismount(mountpoint):\n # We're not going to raise the exception because we're\n # in the desired state anyway. However, this is still\n # unusual so we'll log it.\n LOG.exception(\n 'Error mounting %(fstypes export %(export)s on '\n '%(mountpoint)s. Continuing because mountpount is '\n 'mounted despite this.',\n {'fstype': fstype, 'export': export,\n 'mountpoint': mountpoint}, instance=instance)\n else:\n # If the mount failed there's no reason for us to keep\n # a record of it. It will be created again if the\n # caller retries.\n\n # Delete while holding lock\n del self.mountpoints[mountpoint]\n\n raise\n\n mount.add_attachment(vol_name, instance.uuid)\n\n LOG.debug('_HostMountState.mount() for %(mountpoint)s '\n 'generation %(gen)s completed successfully',\n {'mountpoint': mountpoint, 'gen': self.generation},\n instance=instance)" ]
[ "0.70899236", "0.6207645", "0.61623937", "0.6131135", "0.6095027", "0.59806234", "0.58773124", "0.5866817", "0.58360296", "0.5835725", "0.5802551", "0.5792832", "0.57846195", "0.57844996", "0.57720214", "0.5739339", "0.5727825", "0.57082254", "0.5678425", "0.5647393", "0.5639581", "0.5615241", "0.55848414", "0.55769366", "0.55696744", "0.5539024", "0.55324244", "0.55212396", "0.55176044", "0.5510421" ]
0.62446415
1